]> git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/dsp/cmsis_dsp/TransformFunctions/arm_cfft_radix2_q15.c
Merge commit '1fe4406f374291ab2e86e95a97341fd9c475fcb8'
[tmk_keyboard.git] / tmk_core / tool / mbed / mbed-sdk / libraries / dsp / cmsis_dsp / TransformFunctions / arm_cfft_radix2_q15.c
1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
3 *
4 * $Date: 17. January 2013
5 * $Revision: V1.4.1
6 *
7 * Project: CMSIS DSP Library
8 * Title: arm_cfft_radix2_q15.c
9 *
10 * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function
11 *
12 *
13 * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * - Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * - Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in
22 * the documentation and/or other materials provided with the
23 * distribution.
24 * - Neither the name of ARM LIMITED nor the names of its contributors
25 * may be used to endorse or promote products derived from this
26 * software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
31 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
32 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
34 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
35 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
36 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
38 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 * -------------------------------------------------------------------- */
41
42 #include "arm_math.h"
43
44 void arm_radix2_butterfly_q15(
45 q15_t * pSrc,
46 uint32_t fftLen,
47 q15_t * pCoef,
48 uint16_t twidCoefModifier);
49
50 void arm_radix2_butterfly_inverse_q15(
51 q15_t * pSrc,
52 uint32_t fftLen,
53 q15_t * pCoef,
54 uint16_t twidCoefModifier);
55
56 void arm_bitreversal_q15(
57 q15_t * pSrc,
58 uint32_t fftLen,
59 uint16_t bitRevFactor,
60 uint16_t * pBitRevTab);
61
62 /**
63 * @ingroup groupTransforms
64 */
65
66 /**
67 * @addtogroup ComplexFFT
68 * @{
69 */
70
71 /**
72 * @details
73 * @brief Processing function for the fixed-point CFFT/CIFFT.
74 * @param[in] *S points to an instance of the fixed-point CFFT/CIFFT structure.
75 * @param[in, out] *pSrc points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place.
76 * @return none.
77 */
78
79 void arm_cfft_radix2_q15(
80 const arm_cfft_radix2_instance_q15 * S,
81 q15_t * pSrc)
82 {
83
84 if(S->ifftFlag == 1u)
85 {
86 arm_radix2_butterfly_inverse_q15(pSrc, S->fftLen,
87 S->pTwiddle, S->twidCoefModifier);
88 }
89 else
90 {
91 arm_radix2_butterfly_q15(pSrc, S->fftLen,
92 S->pTwiddle, S->twidCoefModifier);
93 }
94
95 arm_bitreversal_q15(pSrc, S->fftLen, S->bitRevFactor, S->pBitRevTable);
96 }
97
98 /**
99 * @} end of ComplexFFT group
100 */
101
102 void arm_radix2_butterfly_q15(
103 q15_t * pSrc,
104 uint32_t fftLen,
105 q15_t * pCoef,
106 uint16_t twidCoefModifier)
107 {
108 #ifndef ARM_MATH_CM0_FAMILY
109
110 unsigned i, j, k, l;
111 unsigned n1, n2, ia;
112 q15_t in;
113 q31_t T, S, R;
114 q31_t coeff, out1, out2;
115
116 //N = fftLen;
117 n2 = fftLen;
118
119 n1 = n2;
120 n2 = n2 >> 1;
121 ia = 0;
122
123 // loop for groups
124 for (i = 0; i < n2; i++)
125 {
126 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
127
128 ia = ia + twidCoefModifier;
129
130 l = i + n2;
131
132 T = _SIMD32_OFFSET(pSrc + (2 * i));
133 in = ((int16_t) (T & 0xFFFF)) >> 2;
134 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
135
136 S = _SIMD32_OFFSET(pSrc + (2 * l));
137 in = ((int16_t) (S & 0xFFFF)) >> 2;
138 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
139
140 R = __QSUB16(T, S);
141
142 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
143
144 #ifndef ARM_MATH_BIG_ENDIAN
145
146 out1 = __SMUAD(coeff, R) >> 16;
147 out2 = __SMUSDX(coeff, R);
148
149 #else
150
151 out1 = __SMUSDX(R, coeff) >> 16u;
152 out2 = __SMUAD(coeff, R);
153
154 #endif // #ifndef ARM_MATH_BIG_ENDIAN
155
156 _SIMD32_OFFSET(pSrc + (2u * l)) =
157 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
158
159 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
160
161 ia = ia + twidCoefModifier;
162
163 // loop for butterfly
164 i++;
165 l++;
166
167 T = _SIMD32_OFFSET(pSrc + (2 * i));
168 in = ((int16_t) (T & 0xFFFF)) >> 2;
169 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
170
171 S = _SIMD32_OFFSET(pSrc + (2 * l));
172 in = ((int16_t) (S & 0xFFFF)) >> 2;
173 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
174
175 R = __QSUB16(T, S);
176
177 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
178
179 #ifndef ARM_MATH_BIG_ENDIAN
180
181 out1 = __SMUAD(coeff, R) >> 16;
182 out2 = __SMUSDX(coeff, R);
183
184 #else
185
186 out1 = __SMUSDX(R, coeff) >> 16u;
187 out2 = __SMUAD(coeff, R);
188
189 #endif // #ifndef ARM_MATH_BIG_ENDIAN
190
191 _SIMD32_OFFSET(pSrc + (2u * l)) =
192 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
193
194 } // groups loop end
195
196 twidCoefModifier = twidCoefModifier << 1u;
197
198 // loop for stage
199 for (k = fftLen / 2; k > 2; k = k >> 1)
200 {
201 n1 = n2;
202 n2 = n2 >> 1;
203 ia = 0;
204
205 // loop for groups
206 for (j = 0; j < n2; j++)
207 {
208 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
209
210 ia = ia + twidCoefModifier;
211
212 // loop for butterfly
213 for (i = j; i < fftLen; i += n1)
214 {
215 l = i + n2;
216
217 T = _SIMD32_OFFSET(pSrc + (2 * i));
218
219 S = _SIMD32_OFFSET(pSrc + (2 * l));
220
221 R = __QSUB16(T, S);
222
223 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
224
225 #ifndef ARM_MATH_BIG_ENDIAN
226
227 out1 = __SMUAD(coeff, R) >> 16;
228 out2 = __SMUSDX(coeff, R);
229
230 #else
231
232 out1 = __SMUSDX(R, coeff) >> 16u;
233 out2 = __SMUAD(coeff, R);
234
235 #endif // #ifndef ARM_MATH_BIG_ENDIAN
236
237 _SIMD32_OFFSET(pSrc + (2u * l)) =
238 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
239
240 i += n1;
241
242 l = i + n2;
243
244 T = _SIMD32_OFFSET(pSrc + (2 * i));
245
246 S = _SIMD32_OFFSET(pSrc + (2 * l));
247
248 R = __QSUB16(T, S);
249
250 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
251
252 #ifndef ARM_MATH_BIG_ENDIAN
253
254 out1 = __SMUAD(coeff, R) >> 16;
255 out2 = __SMUSDX(coeff, R);
256
257 #else
258
259 out1 = __SMUSDX(R, coeff) >> 16u;
260 out2 = __SMUAD(coeff, R);
261
262 #endif // #ifndef ARM_MATH_BIG_ENDIAN
263
264 _SIMD32_OFFSET(pSrc + (2u * l)) =
265 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
266
267 } // butterfly loop end
268
269 } // groups loop end
270
271 twidCoefModifier = twidCoefModifier << 1u;
272 } // stages loop end
273
274 n1 = n2;
275 n2 = n2 >> 1;
276 ia = 0;
277
278 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
279
280 ia = ia + twidCoefModifier;
281
282 // loop for butterfly
283 for (i = 0; i < fftLen; i += n1)
284 {
285 l = i + n2;
286
287 T = _SIMD32_OFFSET(pSrc + (2 * i));
288
289 S = _SIMD32_OFFSET(pSrc + (2 * l));
290
291 R = __QSUB16(T, S);
292
293 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
294
295 _SIMD32_OFFSET(pSrc + (2u * l)) = R;
296
297 i += n1;
298 l = i + n2;
299
300 T = _SIMD32_OFFSET(pSrc + (2 * i));
301
302 S = _SIMD32_OFFSET(pSrc + (2 * l));
303
304 R = __QSUB16(T, S);
305
306 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
307
308 _SIMD32_OFFSET(pSrc + (2u * l)) = R;
309
310 } // groups loop end
311
312
313 #else
314
315 unsigned i, j, k, l;
316 unsigned n1, n2, ia;
317 q15_t xt, yt, cosVal, sinVal;
318
319
320 //N = fftLen;
321 n2 = fftLen;
322
323 n1 = n2;
324 n2 = n2 >> 1;
325 ia = 0;
326
327 // loop for groups
328 for (j = 0; j < n2; j++)
329 {
330 cosVal = pCoef[ia * 2];
331 sinVal = pCoef[(ia * 2) + 1];
332 ia = ia + twidCoefModifier;
333
334 // loop for butterfly
335 for (i = j; i < fftLen; i += n1)
336 {
337 l = i + n2;
338 xt = (pSrc[2 * i] >> 2u) - (pSrc[2 * l] >> 2u);
339 pSrc[2 * i] = ((pSrc[2 * i] >> 2u) + (pSrc[2 * l] >> 2u)) >> 1u;
340
341 yt = (pSrc[2 * i + 1] >> 2u) - (pSrc[2 * l + 1] >> 2u);
342 pSrc[2 * i + 1] =
343 ((pSrc[2 * l + 1] >> 2u) + (pSrc[2 * i + 1] >> 2u)) >> 1u;
344
345 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) +
346 ((int16_t) (((q31_t) yt * sinVal) >> 16)));
347
348 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) -
349 ((int16_t) (((q31_t) xt * sinVal) >> 16)));
350
351 } // butterfly loop end
352
353 } // groups loop end
354
355 twidCoefModifier = twidCoefModifier << 1u;
356
357 // loop for stage
358 for (k = fftLen / 2; k > 2; k = k >> 1)
359 {
360 n1 = n2;
361 n2 = n2 >> 1;
362 ia = 0;
363
364 // loop for groups
365 for (j = 0; j < n2; j++)
366 {
367 cosVal = pCoef[ia * 2];
368 sinVal = pCoef[(ia * 2) + 1];
369 ia = ia + twidCoefModifier;
370
371 // loop for butterfly
372 for (i = j; i < fftLen; i += n1)
373 {
374 l = i + n2;
375 xt = pSrc[2 * i] - pSrc[2 * l];
376 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u;
377
378 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
379 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u;
380
381 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) +
382 ((int16_t) (((q31_t) yt * sinVal) >> 16)));
383
384 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) -
385 ((int16_t) (((q31_t) xt * sinVal) >> 16)));
386
387 } // butterfly loop end
388
389 } // groups loop end
390
391 twidCoefModifier = twidCoefModifier << 1u;
392 } // stages loop end
393
394 n1 = n2;
395 n2 = n2 >> 1;
396 ia = 0;
397
398 // loop for groups
399 for (j = 0; j < n2; j++)
400 {
401 cosVal = pCoef[ia * 2];
402 sinVal = pCoef[(ia * 2) + 1];
403
404 ia = ia + twidCoefModifier;
405
406 // loop for butterfly
407 for (i = j; i < fftLen; i += n1)
408 {
409 l = i + n2;
410 xt = pSrc[2 * i] - pSrc[2 * l];
411 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]);
412
413 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
414 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]);
415
416 pSrc[2u * l] = xt;
417
418 pSrc[2u * l + 1u] = yt;
419
420 } // butterfly loop end
421
422 } // groups loop end
423
424 twidCoefModifier = twidCoefModifier << 1u;
425
426 #endif // #ifndef ARM_MATH_CM0_FAMILY
427
428 }
429
430
431 void arm_radix2_butterfly_inverse_q15(
432 q15_t * pSrc,
433 uint32_t fftLen,
434 q15_t * pCoef,
435 uint16_t twidCoefModifier)
436 {
437 #ifndef ARM_MATH_CM0_FAMILY
438
439 unsigned i, j, k, l;
440 unsigned n1, n2, ia;
441 q15_t in;
442 q31_t T, S, R;
443 q31_t coeff, out1, out2;
444
445 //N = fftLen;
446 n2 = fftLen;
447
448 n1 = n2;
449 n2 = n2 >> 1;
450 ia = 0;
451
452 // loop for groups
453 for (i = 0; i < n2; i++)
454 {
455 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
456
457 ia = ia + twidCoefModifier;
458
459 l = i + n2;
460
461 T = _SIMD32_OFFSET(pSrc + (2 * i));
462 in = ((int16_t) (T & 0xFFFF)) >> 2;
463 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
464
465 S = _SIMD32_OFFSET(pSrc + (2 * l));
466 in = ((int16_t) (S & 0xFFFF)) >> 2;
467 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
468
469 R = __QSUB16(T, S);
470
471 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
472
473 #ifndef ARM_MATH_BIG_ENDIAN
474
475 out1 = __SMUSD(coeff, R) >> 16;
476 out2 = __SMUADX(coeff, R);
477 #else
478
479 out1 = __SMUADX(R, coeff) >> 16u;
480 out2 = __SMUSD(__QSUB(0, coeff), R);
481
482 #endif // #ifndef ARM_MATH_BIG_ENDIAN
483
484 _SIMD32_OFFSET(pSrc + (2u * l)) =
485 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
486
487 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
488
489 ia = ia + twidCoefModifier;
490
491 // loop for butterfly
492 i++;
493 l++;
494
495 T = _SIMD32_OFFSET(pSrc + (2 * i));
496 in = ((int16_t) (T & 0xFFFF)) >> 2;
497 T = ((T >> 2) & 0xFFFF0000) | (in & 0xFFFF);
498
499 S = _SIMD32_OFFSET(pSrc + (2 * l));
500 in = ((int16_t) (S & 0xFFFF)) >> 2;
501 S = ((S >> 2) & 0xFFFF0000) | (in & 0xFFFF);
502
503 R = __QSUB16(T, S);
504
505 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
506
507 #ifndef ARM_MATH_BIG_ENDIAN
508
509 out1 = __SMUSD(coeff, R) >> 16;
510 out2 = __SMUADX(coeff, R);
511 #else
512
513 out1 = __SMUADX(R, coeff) >> 16u;
514 out2 = __SMUSD(__QSUB(0, coeff), R);
515
516 #endif // #ifndef ARM_MATH_BIG_ENDIAN
517
518 _SIMD32_OFFSET(pSrc + (2u * l)) =
519 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
520
521 } // groups loop end
522
523 twidCoefModifier = twidCoefModifier << 1u;
524
525 // loop for stage
526 for (k = fftLen / 2; k > 2; k = k >> 1)
527 {
528 n1 = n2;
529 n2 = n2 >> 1;
530 ia = 0;
531
532 // loop for groups
533 for (j = 0; j < n2; j++)
534 {
535 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
536
537 ia = ia + twidCoefModifier;
538
539 // loop for butterfly
540 for (i = j; i < fftLen; i += n1)
541 {
542 l = i + n2;
543
544 T = _SIMD32_OFFSET(pSrc + (2 * i));
545
546 S = _SIMD32_OFFSET(pSrc + (2 * l));
547
548 R = __QSUB16(T, S);
549
550 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
551
552 #ifndef ARM_MATH_BIG_ENDIAN
553
554 out1 = __SMUSD(coeff, R) >> 16;
555 out2 = __SMUADX(coeff, R);
556
557 #else
558
559 out1 = __SMUADX(R, coeff) >> 16u;
560 out2 = __SMUSD(__QSUB(0, coeff), R);
561
562 #endif // #ifndef ARM_MATH_BIG_ENDIAN
563
564 _SIMD32_OFFSET(pSrc + (2u * l)) =
565 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
566
567 i += n1;
568
569 l = i + n2;
570
571 T = _SIMD32_OFFSET(pSrc + (2 * i));
572
573 S = _SIMD32_OFFSET(pSrc + (2 * l));
574
575 R = __QSUB16(T, S);
576
577 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S);
578
579 #ifndef ARM_MATH_BIG_ENDIAN
580
581 out1 = __SMUSD(coeff, R) >> 16;
582 out2 = __SMUADX(coeff, R);
583 #else
584
585 out1 = __SMUADX(R, coeff) >> 16u;
586 out2 = __SMUSD(__QSUB(0, coeff), R);
587
588 #endif // #ifndef ARM_MATH_BIG_ENDIAN
589
590 _SIMD32_OFFSET(pSrc + (2u * l)) =
591 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF);
592
593 } // butterfly loop end
594
595 } // groups loop end
596
597 twidCoefModifier = twidCoefModifier << 1u;
598 } // stages loop end
599
600 n1 = n2;
601 n2 = n2 >> 1;
602 ia = 0;
603
604 // loop for groups
605 for (j = 0; j < n2; j++)
606 {
607 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u));
608
609 ia = ia + twidCoefModifier;
610
611 // loop for butterfly
612 for (i = j; i < fftLen; i += n1)
613 {
614 l = i + n2;
615
616 T = _SIMD32_OFFSET(pSrc + (2 * i));
617
618 S = _SIMD32_OFFSET(pSrc + (2 * l));
619
620 R = __QSUB16(T, S);
621
622 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S);
623
624 _SIMD32_OFFSET(pSrc + (2u * l)) = R;
625
626 } // butterfly loop end
627
628 } // groups loop end
629
630 twidCoefModifier = twidCoefModifier << 1u;
631
632 #else
633
634
635 unsigned i, j, k, l;
636 unsigned n1, n2, ia;
637 q15_t xt, yt, cosVal, sinVal;
638
639 //N = fftLen;
640 n2 = fftLen;
641
642 n1 = n2;
643 n2 = n2 >> 1;
644 ia = 0;
645
646 // loop for groups
647 for (j = 0; j < n2; j++)
648 {
649 cosVal = pCoef[ia * 2];
650 sinVal = pCoef[(ia * 2) + 1];
651 ia = ia + twidCoefModifier;
652
653 // loop for butterfly
654 for (i = j; i < fftLen; i += n1)
655 {
656 l = i + n2;
657 xt = (pSrc[2 * i] >> 2u) - (pSrc[2 * l] >> 2u);
658 pSrc[2 * i] = ((pSrc[2 * i] >> 2u) + (pSrc[2 * l] >> 2u)) >> 1u;
659
660 yt = (pSrc[2 * i + 1] >> 2u) - (pSrc[2 * l + 1] >> 2u);
661 pSrc[2 * i + 1] =
662 ((pSrc[2 * l + 1] >> 2u) + (pSrc[2 * i + 1] >> 2u)) >> 1u;
663
664 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) -
665 ((int16_t) (((q31_t) yt * sinVal) >> 16)));
666
667 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) +
668 ((int16_t) (((q31_t) xt * sinVal) >> 16)));
669
670 } // butterfly loop end
671
672 } // groups loop end
673
674 twidCoefModifier = twidCoefModifier << 1u;
675
676 // loop for stage
677 for (k = fftLen / 2; k > 2; k = k >> 1)
678 {
679 n1 = n2;
680 n2 = n2 >> 1;
681 ia = 0;
682
683 // loop for groups
684 for (j = 0; j < n2; j++)
685 {
686 cosVal = pCoef[ia * 2];
687 sinVal = pCoef[(ia * 2) + 1];
688 ia = ia + twidCoefModifier;
689
690 // loop for butterfly
691 for (i = j; i < fftLen; i += n1)
692 {
693 l = i + n2;
694 xt = pSrc[2 * i] - pSrc[2 * l];
695 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u;
696
697 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
698 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u;
699
700 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) -
701 ((int16_t) (((q31_t) yt * sinVal) >> 16)));
702
703 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) +
704 ((int16_t) (((q31_t) xt * sinVal) >> 16)));
705
706 } // butterfly loop end
707
708 } // groups loop end
709
710 twidCoefModifier = twidCoefModifier << 1u;
711 } // stages loop end
712
713 n1 = n2;
714 n2 = n2 >> 1;
715 ia = 0;
716
717 cosVal = pCoef[ia * 2];
718 sinVal = pCoef[(ia * 2) + 1];
719
720 ia = ia + twidCoefModifier;
721
722 // loop for butterfly
723 for (i = 0; i < fftLen; i += n1)
724 {
725 l = i + n2;
726 xt = pSrc[2 * i] - pSrc[2 * l];
727 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]);
728
729 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1];
730 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]);
731
732 pSrc[2u * l] = xt;
733
734 pSrc[2u * l + 1u] = yt;
735
736 } // groups loop end
737
738
739 #endif // #ifndef ARM_MATH_CM0_FAMILY
740
741 }
Imprint / Impressum