]>
git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/dsp/cmsis_dsp/FilteringFunctions/arm_conv_fast_opt_q15.c
1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
4 * $Date: 17. January 2013
7 * Project: CMSIS DSP Library
8 * Title: arm_conv_fast_opt_q15.c
10 * Description: Fast Q15 Convolution.
12 * Target Processor: Cortex-M4/Cortex-M3
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
23 * - Neither the name of ARM LIMITED nor the names of its contributors
24 * may be used to endorse or promote products derived from this
25 * software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
44 * @ingroup groupFilters
53 * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
54 * @param[in] *pSrcA points to the first input sequence.
55 * @param[in] srcALen length of the first input sequence.
56 * @param[in] *pSrcB points to the second input sequence.
57 * @param[in] srcBLen length of the second input sequence.
58 * @param[out] *pDst points to the location where the output result is written. Length srcALen+srcBLen-1.
59 * @param[in] *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
60 * @param[in] *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
64 * If the silicon does not support unaligned memory access enable the macro UNALIGNED_SUPPORT_DISABLE
65 * In this case input, output, scratch1 and scratch2 buffers should be aligned by 32-bit
67 * <b>Scaling and Overflow Behavior:</b>
70 * This fast version uses a 32-bit accumulator with 2.30 format.
71 * The accumulator maintains full precision of the intermediate multiplication results
72 * but provides only a single guard bit. There is no saturation on intermediate additions.
73 * Thus, if the accumulator overflows it wraps around and distorts the result.
74 * The input signals should be scaled down to avoid intermediate overflows.
75 * Scale down the inputs by log2(min(srcALen, srcBLen)) (log2 is read as log to the base 2) times to avoid overflows,
76 * as maximum of min(srcALen, srcBLen) number of additions are carried internally.
77 * The 2.30 accumulator is right shifted by 15 bits and then saturated to 1.15 format to yield the final result.
80 * See <code>arm_conv_q15()</code> for a slower implementation of this function which uses 64-bit accumulation to avoid wrap around distortion.
83 void arm_conv_fast_opt_q15(
92 q31_t acc0
, acc1
, acc2
, acc3
; /* Accumulators */
93 q31_t x1
, x2
, x3
; /* Temporary variables to hold state and coefficient values */
94 q31_t y1
, y2
; /* State variables */
95 q15_t
*pOut
= pDst
; /* output pointer */
96 q15_t
*pScr1
= pScratch1
; /* Temporary pointer for scratch1 */
97 q15_t
*pScr2
= pScratch2
; /* Temporary pointer for scratch1 */
98 q15_t
*pIn1
; /* inputA pointer */
99 q15_t
*pIn2
; /* inputB pointer */
100 q15_t
*px
; /* Intermediate inputA pointer */
101 q15_t
*py
; /* Intermediate inputB pointer */
102 uint32_t j
, k
, blkCnt
; /* loop counter */
103 uint32_t tapCnt
; /* loop count */
104 #ifdef UNALIGNED_SUPPORT_DISABLE
108 #endif /* #ifdef UNALIGNED_SUPPORT_DISABLE */
110 /* The algorithm implementation is based on the lengths of the inputs. */
111 /* srcB is always made to slide across srcA. */
112 /* So srcBLen is always considered as shorter or equal to srcALen */
113 if(srcALen
>= srcBLen
)
115 /* Initialization of inputA pointer */
118 /* Initialization of inputB pointer */
123 /* Initialization of inputA pointer */
126 /* Initialization of inputB pointer */
129 /* srcBLen is always considered as shorter or equal to srcALen */
135 /* Pointer to take end of scratch2 buffer */
136 pScr2
= pScratch2
+ srcBLen
- 1;
138 /* points to smaller length sequence */
141 /* Apply loop unrolling and do 4 Copies simultaneously. */
144 /* First part of the processing with loop unrolling copies 4 data points at a time.
145 ** a second loop below copies for the remaining 1 to 3 samples. */
147 /* Copy smaller length input sequence in reverse order into second scratch buffer */
150 /* copy second buffer in reversal manner */
156 /* Decrement the loop counter */
160 /* If the count is not a multiple of 4, copy remaining samples here.
161 ** No loop unrolling is used. */
166 /* copy second buffer in reversal manner for remaining samples */
169 /* Decrement the loop counter */
173 /* Initialze temporary scratch pointer */
176 /* Assuming scratch1 buffer is aligned by 32-bit */
177 /* Fill (srcBLen - 1u) zeros in scratch1 buffer */
178 arm_fill_q15(0, pScr1
, (srcBLen
- 1u));
180 /* Update temporary scratch pointer */
181 pScr1
+= (srcBLen
- 1u);
183 /* Copy bigger length sequence(srcALen) samples in scratch1 buffer */
185 #ifndef UNALIGNED_SUPPORT_DISABLE
187 /* Copy (srcALen) samples in scratch buffer */
188 arm_copy_q15(pIn1
, pScr1
, srcALen
);
190 /* Update pointers */
195 /* Apply loop unrolling and do 4 Copies simultaneously. */
198 /* First part of the processing with loop unrolling copies 4 data points at a time.
199 ** a second loop below copies for the remaining 1 to 3 samples. */
202 /* copy second buffer in reversal manner */
208 /* Decrement the loop counter */
212 /* If the count is not a multiple of 4, copy remaining samples here.
213 ** No loop unrolling is used. */
218 /* copy second buffer in reversal manner for remaining samples */
221 /* Decrement the loop counter */
225 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
228 #ifndef UNALIGNED_SUPPORT_DISABLE
230 /* Fill (srcBLen - 1u) zeros at end of scratch buffer */
231 arm_fill_q15(0, pScr1
, (srcBLen
- 1u));
234 pScr1
+= (srcBLen
- 1u);
238 /* Apply loop unrolling and do 4 Copies simultaneously. */
239 k
= (srcBLen
- 1u) >> 2u;
241 /* First part of the processing with loop unrolling copies 4 data points at a time.
242 ** a second loop below copies for the remaining 1 to 3 samples. */
245 /* copy second buffer in reversal manner */
251 /* Decrement the loop counter */
255 /* If the count is not a multiple of 4, copy remaining samples here.
256 ** No loop unrolling is used. */
257 k
= (srcBLen
- 1u) % 0x4u
;
261 /* copy second buffer in reversal manner for remaining samples */
264 /* Decrement the loop counter */
268 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
270 /* Temporary pointer for scratch2 */
274 /* Initialization of pIn2 pointer */
277 /* First part of the processing with loop unrolling process 4 data points at a time.
278 ** a second loop below process for the remaining 1 to 3 samples. */
280 /* Actual convolution process starts here */
281 blkCnt
= (srcALen
+ srcBLen
- 1u) >> 2;
285 /* Initialze temporary scratch pointer as scratch1 */
288 /* Clear Accumlators */
294 /* Read two samples from scratch1 buffer */
295 x1
= *__SIMD32(pScr1
)++;
297 /* Read next two samples from scratch1 buffer */
298 x2
= *__SIMD32(pScr1
)++;
300 tapCnt
= (srcBLen
) >> 2u;
305 #ifndef UNALIGNED_SUPPORT_DISABLE
307 /* Read four samples from smaller buffer */
308 y1
= _SIMD32_OFFSET(pIn2
);
309 y2
= _SIMD32_OFFSET(pIn2
+ 2u);
311 /* multiply and accumlate */
312 acc0
= __SMLAD(x1
, y1
, acc0
);
313 acc2
= __SMLAD(x2
, y1
, acc2
);
315 /* pack input data */
316 #ifndef ARM_MATH_BIG_ENDIAN
317 x3
= __PKHBT(x2
, x1
, 0);
319 x3
= __PKHBT(x1
, x2
, 0);
322 /* multiply and accumlate */
323 acc1
= __SMLADX(x3
, y1
, acc1
);
325 /* Read next two samples from scratch1 buffer */
326 x1
= _SIMD32_OFFSET(pScr1
);
328 /* multiply and accumlate */
329 acc0
= __SMLAD(x2
, y2
, acc0
);
330 acc2
= __SMLAD(x1
, y2
, acc2
);
332 /* pack input data */
333 #ifndef ARM_MATH_BIG_ENDIAN
334 x3
= __PKHBT(x1
, x2
, 0);
336 x3
= __PKHBT(x2
, x1
, 0);
339 acc3
= __SMLADX(x3
, y1
, acc3
);
340 acc1
= __SMLADX(x3
, y2
, acc1
);
342 x2
= _SIMD32_OFFSET(pScr1
+ 2u);
344 #ifndef ARM_MATH_BIG_ENDIAN
345 x3
= __PKHBT(x2
, x1
, 0);
347 x3
= __PKHBT(x1
, x2
, 0);
350 acc3
= __SMLADX(x3
, y2
, acc3
);
354 /* Read four samples from smaller buffer */
358 #ifndef ARM_MATH_BIG_ENDIAN
359 y1
= __PKHBT(a
, b
, 16);
361 y1
= __PKHBT(b
, a
, 16);
366 #ifndef ARM_MATH_BIG_ENDIAN
367 y2
= __PKHBT(a
, b
, 16);
369 y2
= __PKHBT(b
, a
, 16);
372 acc0
= __SMLAD(x1
, y1
, acc0
);
374 acc2
= __SMLAD(x2
, y1
, acc2
);
376 #ifndef ARM_MATH_BIG_ENDIAN
377 x3
= __PKHBT(x2
, x1
, 0);
379 x3
= __PKHBT(x1
, x2
, 0);
382 acc1
= __SMLADX(x3
, y1
, acc1
);
387 #ifndef ARM_MATH_BIG_ENDIAN
388 x1
= __PKHBT(a
, b
, 16);
390 x1
= __PKHBT(b
, a
, 16);
393 acc0
= __SMLAD(x2
, y2
, acc0
);
395 acc2
= __SMLAD(x1
, y2
, acc2
);
397 #ifndef ARM_MATH_BIG_ENDIAN
398 x3
= __PKHBT(x1
, x2
, 0);
400 x3
= __PKHBT(x2
, x1
, 0);
403 acc3
= __SMLADX(x3
, y1
, acc3
);
405 acc1
= __SMLADX(x3
, y2
, acc1
);
410 #ifndef ARM_MATH_BIG_ENDIAN
411 x2
= __PKHBT(a
, b
, 16);
413 x2
= __PKHBT(b
, a
, 16);
416 #ifndef ARM_MATH_BIG_ENDIAN
417 x3
= __PKHBT(x2
, x1
, 0);
419 x3
= __PKHBT(x1
, x2
, 0);
422 acc3
= __SMLADX(x3
, y2
, acc3
);
424 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
426 /* update scratch pointers */
431 /* Decrement the loop counter */
435 /* Update scratch pointer for remaining samples of smaller length sequence */
438 /* apply same above for remaining samples of smaller length sequence */
439 tapCnt
= (srcBLen
) & 3u;
444 /* accumlate the results */
445 acc0
+= (*pScr1
++ * *pIn2
);
446 acc1
+= (*pScr1
++ * *pIn2
);
447 acc2
+= (*pScr1
++ * *pIn2
);
448 acc3
+= (*pScr1
++ * *pIn2
++);
452 /* Decrement the loop counter */
459 /* Store the results in the accumulators in the destination buffer. */
461 #ifndef ARM_MATH_BIG_ENDIAN
464 __PKHBT(__SSAT((acc0
>> 15), 16), __SSAT((acc1
>> 15), 16), 16);
467 __PKHBT(__SSAT((acc2
>> 15), 16), __SSAT((acc3
>> 15), 16), 16);
473 __PKHBT(__SSAT((acc1
>> 15), 16), __SSAT((acc0
>> 15), 16), 16);
476 __PKHBT(__SSAT((acc3
>> 15), 16), __SSAT((acc2
>> 15), 16), 16);
480 #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
482 /* Initialization of inputB pointer */
490 blkCnt
= (srcALen
+ srcBLen
- 1u) & 0x3;
492 /* Calculate convolution for remaining samples of Bigger length sequence */
495 /* Initialze temporary scratch pointer as scratch1 */
498 /* Clear Accumlators */
501 tapCnt
= (srcBLen
) >> 1u;
506 acc0
+= (*pScr1
++ * *pIn2
++);
507 acc0
+= (*pScr1
++ * *pIn2
++);
509 /* Decrement the loop counter */
513 tapCnt
= (srcBLen
) & 1u;
515 /* apply same above for remaining samples of smaller length sequence */
519 /* accumlate the results */
520 acc0
+= (*pScr1
++ * *pIn2
++);
522 /* Decrement the loop counter */
528 /* The result is in 2.30 format. Convert to 1.15 with saturation.
529 ** Then store the output in the destination buffer. */
530 *pOut
++ = (q15_t
) (__SSAT((acc0
>> 15), 16));
532 /* Initialization of inputB pointer */
542 * @} end of Conv group