]> git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/dsp/cmsis_dsp/FilteringFunctions/arm_correlate_fast_opt_q15.c
Merge commit '1fe4406f374291ab2e86e95a97341fd9c475fcb8'
[tmk_keyboard.git] / tmk_core / tool / mbed / mbed-sdk / libraries / dsp / cmsis_dsp / FilteringFunctions / arm_correlate_fast_opt_q15.c
1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
3 *
4 * $Date: 17. January 2013
5 * $Revision: V1.4.1
6 *
7 * Project: CMSIS DSP Library
8 * Title: arm_correlate_fast_opt_q15.c
9 *
10 * Description: Fast Q15 Correlation.
11 *
12 * Target Processor: Cortex-M4/Cortex-M3
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 * - Neither the name of ARM LIMITED nor the names of its contributors
24 * may be used to endorse or promote products derived from this
25 * software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
40
41 #include "arm_math.h"
42
43 /**
44 * @ingroup groupFilters
45 */
46
47 /**
48 * @addtogroup Corr
49 * @{
50 */
51
52 /**
53 * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
54 * @param[in] *pSrcA points to the first input sequence.
55 * @param[in] srcALen length of the first input sequence.
56 * @param[in] *pSrcB points to the second input sequence.
57 * @param[in] srcBLen length of the second input sequence.
58 * @param[out] *pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1.
59 * @param[in] *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
60 * @return none.
61 *
62 *
63 * \par Restrictions
64 * If the silicon does not support unaligned memory access enable the macro UNALIGNED_SUPPORT_DISABLE
65 * In this case input, output, scratch buffers should be aligned by 32-bit
66 *
67 *
68 * <b>Scaling and Overflow Behavior:</b>
69 *
70 * \par
71 * This fast version uses a 32-bit accumulator with 2.30 format.
72 * The accumulator maintains full precision of the intermediate multiplication results but provides only a single guard bit.
73 * There is no saturation on intermediate additions.
74 * Thus, if the accumulator overflows it wraps around and distorts the result.
75 * The input signals should be scaled down to avoid intermediate overflows.
76 * Scale down one of the inputs by 1/min(srcALen, srcBLen) to avoid overflow since a
77 * maximum of min(srcALen, srcBLen) number of additions is carried internally.
78 * The 2.30 accumulator is right shifted by 15 bits and then saturated to 1.15 format to yield the final result.
79 *
80 * \par
81 * See <code>arm_correlate_q15()</code> for a slower implementation of this function which uses a 64-bit accumulator to avoid wrap around distortion.
82 */
83
84 void arm_correlate_fast_opt_q15(
85 q15_t * pSrcA,
86 uint32_t srcALen,
87 q15_t * pSrcB,
88 uint32_t srcBLen,
89 q15_t * pDst,
90 q15_t * pScratch)
91 {
92 q15_t *pIn1; /* inputA pointer */
93 q15_t *pIn2; /* inputB pointer */
94 q31_t acc0, acc1, acc2, acc3; /* Accumulators */
95 q15_t *py; /* Intermediate inputB pointer */
96 q31_t x1, x2, x3; /* temporary variables for holding input and coefficient values */
97 uint32_t j, blkCnt, outBlockSize; /* loop counter */
98 int32_t inc = 1; /* Destination address modifier */
99 uint32_t tapCnt;
100 q31_t y1, y2;
101 q15_t *pScr; /* Intermediate pointers */
102 q15_t *pOut = pDst; /* output pointer */
103 #ifdef UNALIGNED_SUPPORT_DISABLE
104
105 q15_t a, b;
106
107 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
108
109 /* The algorithm implementation is based on the lengths of the inputs. */
110 /* srcB is always made to slide across srcA. */
111 /* So srcBLen is always considered as shorter or equal to srcALen */
112 /* But CORR(x, y) is reverse of CORR(y, x) */
113 /* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */
114 /* and the destination pointer modifier, inc is set to -1 */
115 /* If srcALen > srcBLen, zero pad has to be done to srcB to make the two inputs of same length */
116 /* But to improve the performance,
117 * we include zeroes in the output instead of zero padding either of the the inputs*/
118 /* If srcALen > srcBLen,
119 * (srcALen - srcBLen) zeroes has to included in the starting of the output buffer */
120 /* If srcALen < srcBLen,
121 * (srcALen - srcBLen) zeroes has to included in the ending of the output buffer */
122 if(srcALen >= srcBLen)
123 {
124 /* Initialization of inputA pointer */
125 pIn1 = (pSrcA);
126
127 /* Initialization of inputB pointer */
128 pIn2 = (pSrcB);
129
130 /* Number of output samples is calculated */
131 outBlockSize = (2u * srcALen) - 1u;
132
133 /* When srcALen > srcBLen, zero padding is done to srcB
134 * to make their lengths equal.
135 * Instead, (outBlockSize - (srcALen + srcBLen - 1))
136 * number of output samples are made zero */
137 j = outBlockSize - (srcALen + (srcBLen - 1u));
138
139 /* Updating the pointer position to non zero value */
140 pOut += j;
141
142 }
143 else
144 {
145 /* Initialization of inputA pointer */
146 pIn1 = (pSrcB);
147
148 /* Initialization of inputB pointer */
149 pIn2 = (pSrcA);
150
151 /* srcBLen is always considered as shorter or equal to srcALen */
152 j = srcBLen;
153 srcBLen = srcALen;
154 srcALen = j;
155
156 /* CORR(x, y) = Reverse order(CORR(y, x)) */
157 /* Hence set the destination pointer to point to the last output sample */
158 pOut = pDst + ((srcALen + srcBLen) - 2u);
159
160 /* Destination address modifier is set to -1 */
161 inc = -1;
162
163 }
164
165 pScr = pScratch;
166
167 /* Fill (srcBLen - 1u) zeros in scratch buffer */
168 arm_fill_q15(0, pScr, (srcBLen - 1u));
169
170 /* Update temporary scratch pointer */
171 pScr += (srcBLen - 1u);
172
173 #ifndef UNALIGNED_SUPPORT_DISABLE
174
175 /* Copy (srcALen) samples in scratch buffer */
176 arm_copy_q15(pIn1, pScr, srcALen);
177
178 /* Update pointers */
179 pScr += srcALen;
180
181 #else
182
183 /* Apply loop unrolling and do 4 Copies simultaneously. */
184 j = srcALen >> 2u;
185
186 /* First part of the processing with loop unrolling copies 4 data points at a time.
187 ** a second loop below copies for the remaining 1 to 3 samples. */
188 while(j > 0u)
189 {
190 /* copy second buffer in reversal manner */
191 *pScr++ = *pIn1++;
192 *pScr++ = *pIn1++;
193 *pScr++ = *pIn1++;
194 *pScr++ = *pIn1++;
195
196 /* Decrement the loop counter */
197 j--;
198 }
199
200 /* If the count is not a multiple of 4, copy remaining samples here.
201 ** No loop unrolling is used. */
202 j = srcALen % 0x4u;
203
204 while(j > 0u)
205 {
206 /* copy second buffer in reversal manner for remaining samples */
207 *pScr++ = *pIn1++;
208
209 /* Decrement the loop counter */
210 j--;
211 }
212
213 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
214
215 #ifndef UNALIGNED_SUPPORT_DISABLE
216
217 /* Fill (srcBLen - 1u) zeros at end of scratch buffer */
218 arm_fill_q15(0, pScr, (srcBLen - 1u));
219
220 /* Update pointer */
221 pScr += (srcBLen - 1u);
222
223 #else
224
225 /* Apply loop unrolling and do 4 Copies simultaneously. */
226 j = (srcBLen - 1u) >> 2u;
227
228 /* First part of the processing with loop unrolling copies 4 data points at a time.
229 ** a second loop below copies for the remaining 1 to 3 samples. */
230 while(j > 0u)
231 {
232 /* copy second buffer in reversal manner */
233 *pScr++ = 0;
234 *pScr++ = 0;
235 *pScr++ = 0;
236 *pScr++ = 0;
237
238 /* Decrement the loop counter */
239 j--;
240 }
241
242 /* If the count is not a multiple of 4, copy remaining samples here.
243 ** No loop unrolling is used. */
244 j = (srcBLen - 1u) % 0x4u;
245
246 while(j > 0u)
247 {
248 /* copy second buffer in reversal manner for remaining samples */
249 *pScr++ = 0;
250
251 /* Decrement the loop counter */
252 j--;
253 }
254
255 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
256
257 /* Temporary pointer for scratch2 */
258 py = pIn2;
259
260
261 /* Actual correlation process starts here */
262 blkCnt = (srcALen + srcBLen - 1u) >> 2;
263
264 while(blkCnt > 0)
265 {
266 /* Initialze temporary scratch pointer as scratch1 */
267 pScr = pScratch;
268
269 /* Clear Accumlators */
270 acc0 = 0;
271 acc1 = 0;
272 acc2 = 0;
273 acc3 = 0;
274
275 /* Read four samples from scratch1 buffer */
276 x1 = *__SIMD32(pScr)++;
277
278 /* Read next four samples from scratch1 buffer */
279 x2 = *__SIMD32(pScr)++;
280
281 tapCnt = (srcBLen) >> 2u;
282
283 while(tapCnt > 0u)
284 {
285
286 #ifndef UNALIGNED_SUPPORT_DISABLE
287
288 /* Read four samples from smaller buffer */
289 y1 = _SIMD32_OFFSET(pIn2);
290 y2 = _SIMD32_OFFSET(pIn2 + 2u);
291
292 acc0 = __SMLAD(x1, y1, acc0);
293
294 acc2 = __SMLAD(x2, y1, acc2);
295
296 #ifndef ARM_MATH_BIG_ENDIAN
297 x3 = __PKHBT(x2, x1, 0);
298 #else
299 x3 = __PKHBT(x1, x2, 0);
300 #endif
301
302 acc1 = __SMLADX(x3, y1, acc1);
303
304 x1 = _SIMD32_OFFSET(pScr);
305
306 acc0 = __SMLAD(x2, y2, acc0);
307
308 acc2 = __SMLAD(x1, y2, acc2);
309
310 #ifndef ARM_MATH_BIG_ENDIAN
311 x3 = __PKHBT(x1, x2, 0);
312 #else
313 x3 = __PKHBT(x2, x1, 0);
314 #endif
315
316 acc3 = __SMLADX(x3, y1, acc3);
317
318 acc1 = __SMLADX(x3, y2, acc1);
319
320 x2 = _SIMD32_OFFSET(pScr + 2u);
321
322 #ifndef ARM_MATH_BIG_ENDIAN
323 x3 = __PKHBT(x2, x1, 0);
324 #else
325 x3 = __PKHBT(x1, x2, 0);
326 #endif
327
328 acc3 = __SMLADX(x3, y2, acc3);
329 #else
330
331 /* Read four samples from smaller buffer */
332 a = *pIn2;
333 b = *(pIn2 + 1);
334
335 #ifndef ARM_MATH_BIG_ENDIAN
336 y1 = __PKHBT(a, b, 16);
337 #else
338 y1 = __PKHBT(b, a, 16);
339 #endif
340
341 a = *(pIn2 + 2);
342 b = *(pIn2 + 3);
343 #ifndef ARM_MATH_BIG_ENDIAN
344 y2 = __PKHBT(a, b, 16);
345 #else
346 y2 = __PKHBT(b, a, 16);
347 #endif
348
349 acc0 = __SMLAD(x1, y1, acc0);
350
351 acc2 = __SMLAD(x2, y1, acc2);
352
353 #ifndef ARM_MATH_BIG_ENDIAN
354 x3 = __PKHBT(x2, x1, 0);
355 #else
356 x3 = __PKHBT(x1, x2, 0);
357 #endif
358
359 acc1 = __SMLADX(x3, y1, acc1);
360
361 a = *pScr;
362 b = *(pScr + 1);
363
364 #ifndef ARM_MATH_BIG_ENDIAN
365 x1 = __PKHBT(a, b, 16);
366 #else
367 x1 = __PKHBT(b, a, 16);
368 #endif
369
370 acc0 = __SMLAD(x2, y2, acc0);
371
372 acc2 = __SMLAD(x1, y2, acc2);
373
374 #ifndef ARM_MATH_BIG_ENDIAN
375 x3 = __PKHBT(x1, x2, 0);
376 #else
377 x3 = __PKHBT(x2, x1, 0);
378 #endif
379
380 acc3 = __SMLADX(x3, y1, acc3);
381
382 acc1 = __SMLADX(x3, y2, acc1);
383
384 a = *(pScr + 2);
385 b = *(pScr + 3);
386
387 #ifndef ARM_MATH_BIG_ENDIAN
388 x2 = __PKHBT(a, b, 16);
389 #else
390 x2 = __PKHBT(b, a, 16);
391 #endif
392
393 #ifndef ARM_MATH_BIG_ENDIAN
394 x3 = __PKHBT(x2, x1, 0);
395 #else
396 x3 = __PKHBT(x1, x2, 0);
397 #endif
398
399 acc3 = __SMLADX(x3, y2, acc3);
400
401 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
402
403 pIn2 += 4u;
404
405 pScr += 4u;
406
407
408 /* Decrement the loop counter */
409 tapCnt--;
410 }
411
412
413
414 /* Update scratch pointer for remaining samples of smaller length sequence */
415 pScr -= 4u;
416
417
418 /* apply same above for remaining samples of smaller length sequence */
419 tapCnt = (srcBLen) & 3u;
420
421 while(tapCnt > 0u)
422 {
423
424 /* accumlate the results */
425 acc0 += (*pScr++ * *pIn2);
426 acc1 += (*pScr++ * *pIn2);
427 acc2 += (*pScr++ * *pIn2);
428 acc3 += (*pScr++ * *pIn2++);
429
430 pScr -= 3u;
431
432 /* Decrement the loop counter */
433 tapCnt--;
434 }
435
436 blkCnt--;
437
438
439 /* Store the results in the accumulators in the destination buffer. */
440 *pOut = (__SSAT(acc0 >> 15u, 16));
441 pOut += inc;
442 *pOut = (__SSAT(acc1 >> 15u, 16));
443 pOut += inc;
444 *pOut = (__SSAT(acc2 >> 15u, 16));
445 pOut += inc;
446 *pOut = (__SSAT(acc3 >> 15u, 16));
447 pOut += inc;
448
449
450 /* Initialization of inputB pointer */
451 pIn2 = py;
452
453 pScratch += 4u;
454
455 }
456
457
458 blkCnt = (srcALen + srcBLen - 1u) & 0x3;
459
460 /* Calculate correlation for remaining samples of Bigger length sequence */
461 while(blkCnt > 0)
462 {
463 /* Initialze temporary scratch pointer as scratch1 */
464 pScr = pScratch;
465
466 /* Clear Accumlators */
467 acc0 = 0;
468
469 tapCnt = (srcBLen) >> 1u;
470
471 while(tapCnt > 0u)
472 {
473
474 acc0 += (*pScr++ * *pIn2++);
475 acc0 += (*pScr++ * *pIn2++);
476
477 /* Decrement the loop counter */
478 tapCnt--;
479 }
480
481 tapCnt = (srcBLen) & 1u;
482
483 /* apply same above for remaining samples of smaller length sequence */
484 while(tapCnt > 0u)
485 {
486
487 /* accumlate the results */
488 acc0 += (*pScr++ * *pIn2++);
489
490 /* Decrement the loop counter */
491 tapCnt--;
492 }
493
494 blkCnt--;
495
496 /* Store the result in the accumulator in the destination buffer. */
497
498 *pOut = (q15_t) (__SSAT((acc0 >> 15), 16));
499
500 pOut += inc;
501
502 /* Initialization of inputB pointer */
503 pIn2 = py;
504
505 pScratch += 1u;
506
507 }
508 }
509
510 /**
511 * @} end of Corr group
512 */
Imprint / Impressum