]>
git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/dsp/cmsis_dsp/FilteringFunctions/arm_correlate_fast_opt_q15.c
1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
4 * $Date: 17. January 2013
7 * Project: CMSIS DSP Library
8 * Title: arm_correlate_fast_opt_q15.c
10 * Description: Fast Q15 Correlation.
12 * Target Processor: Cortex-M4/Cortex-M3
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
23 * - Neither the name of ARM LIMITED nor the names of its contributors
24 * may be used to endorse or promote products derived from this
25 * software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
44 * @ingroup groupFilters
53 * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
54 * @param[in] *pSrcA points to the first input sequence.
55 * @param[in] srcALen length of the first input sequence.
56 * @param[in] *pSrcB points to the second input sequence.
57 * @param[in] srcBLen length of the second input sequence.
58 * @param[out] *pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1.
59 * @param[in] *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
64 * If the silicon does not support unaligned memory access enable the macro UNALIGNED_SUPPORT_DISABLE
65 * In this case input, output, scratch buffers should be aligned by 32-bit
68 * <b>Scaling and Overflow Behavior:</b>
71 * This fast version uses a 32-bit accumulator with 2.30 format.
72 * The accumulator maintains full precision of the intermediate multiplication results but provides only a single guard bit.
73 * There is no saturation on intermediate additions.
74 * Thus, if the accumulator overflows it wraps around and distorts the result.
75 * The input signals should be scaled down to avoid intermediate overflows.
76 * Scale down one of the inputs by 1/min(srcALen, srcBLen) to avoid overflow since a
77 * maximum of min(srcALen, srcBLen) number of additions is carried internally.
78 * The 2.30 accumulator is right shifted by 15 bits and then saturated to 1.15 format to yield the final result.
81 * See <code>arm_correlate_q15()</code> for a slower implementation of this function which uses a 64-bit accumulator to avoid wrap around distortion.
84 void arm_correlate_fast_opt_q15 (
92 q15_t
* pIn1
; /* inputA pointer */
93 q15_t
* pIn2
; /* inputB pointer */
94 q31_t acc0
, acc1
, acc2
, acc3
; /* Accumulators */
95 q15_t
* py
; /* Intermediate inputB pointer */
96 q31_t x1
, x2
, x3
; /* temporary variables for holding input and coefficient values */
97 uint32_t j
, blkCnt
, outBlockSize
; /* loop counter */
98 int32_t inc
= 1 ; /* Destination address modifier */
101 q15_t
* pScr
; /* Intermediate pointers */
102 q15_t
* pOut
= pDst
; /* output pointer */
103 #ifdef UNALIGNED_SUPPORT_DISABLE
107 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
109 /* The algorithm implementation is based on the lengths of the inputs. */
110 /* srcB is always made to slide across srcA. */
111 /* So srcBLen is always considered as shorter or equal to srcALen */
112 /* But CORR(x, y) is reverse of CORR(y, x) */
113 /* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */
114 /* and the destination pointer modifier, inc is set to -1 */
115 /* If srcALen > srcBLen, zero pad has to be done to srcB to make the two inputs of same length */
116 /* But to improve the performance,
117 * we include zeroes in the output instead of zero padding either of the the inputs*/
118 /* If srcALen > srcBLen,
119 * (srcALen - srcBLen) zeroes has to included in the starting of the output buffer */
120 /* If srcALen < srcBLen,
121 * (srcALen - srcBLen) zeroes has to included in the ending of the output buffer */
122 if ( srcALen
>= srcBLen
)
124 /* Initialization of inputA pointer */
127 /* Initialization of inputB pointer */
130 /* Number of output samples is calculated */
131 outBlockSize
= ( 2u * srcALen
) - 1u ;
133 /* When srcALen > srcBLen, zero padding is done to srcB
134 * to make their lengths equal.
135 * Instead, (outBlockSize - (srcALen + srcBLen - 1))
136 * number of output samples are made zero */
137 j
= outBlockSize
- ( srcALen
+ ( srcBLen
- 1u ));
139 /* Updating the pointer position to non zero value */
145 /* Initialization of inputA pointer */
148 /* Initialization of inputB pointer */
151 /* srcBLen is always considered as shorter or equal to srcALen */
156 /* CORR(x, y) = Reverse order(CORR(y, x)) */
157 /* Hence set the destination pointer to point to the last output sample */
158 pOut
= pDst
+ (( srcALen
+ srcBLen
) - 2u );
160 /* Destination address modifier is set to -1 */
167 /* Fill (srcBLen - 1u) zeros in scratch buffer */
168 arm_fill_q15 ( 0 , pScr
, ( srcBLen
- 1u ));
170 /* Update temporary scratch pointer */
171 pScr
+= ( srcBLen
- 1u );
173 #ifndef UNALIGNED_SUPPORT_DISABLE
175 /* Copy (srcALen) samples in scratch buffer */
176 arm_copy_q15 ( pIn1
, pScr
, srcALen
);
178 /* Update pointers */
183 /* Apply loop unrolling and do 4 Copies simultaneously. */
186 /* First part of the processing with loop unrolling copies 4 data points at a time.
187 ** a second loop below copies for the remaining 1 to 3 samples. */
190 /* copy second buffer in reversal manner */
196 /* Decrement the loop counter */
200 /* If the count is not a multiple of 4, copy remaining samples here.
201 ** No loop unrolling is used. */
206 /* copy second buffer in reversal manner for remaining samples */
209 /* Decrement the loop counter */
213 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
215 #ifndef UNALIGNED_SUPPORT_DISABLE
217 /* Fill (srcBLen - 1u) zeros at end of scratch buffer */
218 arm_fill_q15 ( 0 , pScr
, ( srcBLen
- 1u ));
221 pScr
+= ( srcBLen
- 1u );
225 /* Apply loop unrolling and do 4 Copies simultaneously. */
226 j
= ( srcBLen
- 1u ) >> 2u ;
228 /* First part of the processing with loop unrolling copies 4 data points at a time.
229 ** a second loop below copies for the remaining 1 to 3 samples. */
232 /* copy second buffer in reversal manner */
238 /* Decrement the loop counter */
242 /* If the count is not a multiple of 4, copy remaining samples here.
243 ** No loop unrolling is used. */
244 j
= ( srcBLen
- 1u ) % 0x4 u
;
248 /* copy second buffer in reversal manner for remaining samples */
251 /* Decrement the loop counter */
255 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
257 /* Temporary pointer for scratch2 */
261 /* Actual correlation process starts here */
262 blkCnt
= ( srcALen
+ srcBLen
- 1u ) >> 2 ;
266 /* Initialze temporary scratch pointer as scratch1 */
269 /* Clear Accumlators */
275 /* Read four samples from scratch1 buffer */
276 x1
= * __SIMD32 ( pScr
)++;
278 /* Read next four samples from scratch1 buffer */
279 x2
= * __SIMD32 ( pScr
)++;
281 tapCnt
= ( srcBLen
) >> 2u ;
286 #ifndef UNALIGNED_SUPPORT_DISABLE
288 /* Read four samples from smaller buffer */
289 y1
= _SIMD32_OFFSET ( pIn2
);
290 y2
= _SIMD32_OFFSET ( pIn2
+ 2u );
292 acc0
= __SMLAD ( x1
, y1
, acc0
);
294 acc2
= __SMLAD ( x2
, y1
, acc2
);
296 #ifndef ARM_MATH_BIG_ENDIAN
297 x3
= __PKHBT ( x2
, x1
, 0 );
299 x3
= __PKHBT ( x1
, x2
, 0 );
302 acc1
= __SMLADX ( x3
, y1
, acc1
);
304 x1
= _SIMD32_OFFSET ( pScr
);
306 acc0
= __SMLAD ( x2
, y2
, acc0
);
308 acc2
= __SMLAD ( x1
, y2
, acc2
);
310 #ifndef ARM_MATH_BIG_ENDIAN
311 x3
= __PKHBT ( x1
, x2
, 0 );
313 x3
= __PKHBT ( x2
, x1
, 0 );
316 acc3
= __SMLADX ( x3
, y1
, acc3
);
318 acc1
= __SMLADX ( x3
, y2
, acc1
);
320 x2
= _SIMD32_OFFSET ( pScr
+ 2u );
322 #ifndef ARM_MATH_BIG_ENDIAN
323 x3
= __PKHBT ( x2
, x1
, 0 );
325 x3
= __PKHBT ( x1
, x2
, 0 );
328 acc3
= __SMLADX ( x3
, y2
, acc3
);
331 /* Read four samples from smaller buffer */
335 #ifndef ARM_MATH_BIG_ENDIAN
336 y1
= __PKHBT ( a
, b
, 16 );
338 y1
= __PKHBT ( b
, a
, 16 );
343 #ifndef ARM_MATH_BIG_ENDIAN
344 y2
= __PKHBT ( a
, b
, 16 );
346 y2
= __PKHBT ( b
, a
, 16 );
349 acc0
= __SMLAD ( x1
, y1
, acc0
);
351 acc2
= __SMLAD ( x2
, y1
, acc2
);
353 #ifndef ARM_MATH_BIG_ENDIAN
354 x3
= __PKHBT ( x2
, x1
, 0 );
356 x3
= __PKHBT ( x1
, x2
, 0 );
359 acc1
= __SMLADX ( x3
, y1
, acc1
);
364 #ifndef ARM_MATH_BIG_ENDIAN
365 x1
= __PKHBT ( a
, b
, 16 );
367 x1
= __PKHBT ( b
, a
, 16 );
370 acc0
= __SMLAD ( x2
, y2
, acc0
);
372 acc2
= __SMLAD ( x1
, y2
, acc2
);
374 #ifndef ARM_MATH_BIG_ENDIAN
375 x3
= __PKHBT ( x1
, x2
, 0 );
377 x3
= __PKHBT ( x2
, x1
, 0 );
380 acc3
= __SMLADX ( x3
, y1
, acc3
);
382 acc1
= __SMLADX ( x3
, y2
, acc1
);
387 #ifndef ARM_MATH_BIG_ENDIAN
388 x2
= __PKHBT ( a
, b
, 16 );
390 x2
= __PKHBT ( b
, a
, 16 );
393 #ifndef ARM_MATH_BIG_ENDIAN
394 x3
= __PKHBT ( x2
, x1
, 0 );
396 x3
= __PKHBT ( x1
, x2
, 0 );
399 acc3
= __SMLADX ( x3
, y2
, acc3
);
401 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
408 /* Decrement the loop counter */
414 /* Update scratch pointer for remaining samples of smaller length sequence */
418 /* apply same above for remaining samples of smaller length sequence */
419 tapCnt
= ( srcBLen
) & 3u ;
424 /* accumlate the results */
425 acc0
+= (* pScr
++ * * pIn2
);
426 acc1
+= (* pScr
++ * * pIn2
);
427 acc2
+= (* pScr
++ * * pIn2
);
428 acc3
+= (* pScr
++ * * pIn2
++);
432 /* Decrement the loop counter */
439 /* Store the results in the accumulators in the destination buffer. */
440 * pOut
= ( __SSAT ( acc0
>> 15u , 16 ));
442 * pOut
= ( __SSAT ( acc1
>> 15u , 16 ));
444 * pOut
= ( __SSAT ( acc2
>> 15u , 16 ));
446 * pOut
= ( __SSAT ( acc3
>> 15u , 16 ));
450 /* Initialization of inputB pointer */
458 blkCnt
= ( srcALen
+ srcBLen
- 1u ) & 0x3 ;
460 /* Calculate correlation for remaining samples of Bigger length sequence */
463 /* Initialze temporary scratch pointer as scratch1 */
466 /* Clear Accumlators */
469 tapCnt
= ( srcBLen
) >> 1u ;
474 acc0
+= (* pScr
++ * * pIn2
++);
475 acc0
+= (* pScr
++ * * pIn2
++);
477 /* Decrement the loop counter */
481 tapCnt
= ( srcBLen
) & 1u ;
483 /* apply same above for remaining samples of smaller length sequence */
487 /* accumlate the results */
488 acc0
+= (* pScr
++ * * pIn2
++);
490 /* Decrement the loop counter */
496 /* Store the result in the accumulator in the destination buffer. */
498 * pOut
= ( q15_t
) ( __SSAT (( acc0
>> 15 ), 16 ));
502 /* Initialization of inputB pointer */
511 * @} end of Corr group