]>
git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/dsp/cmsis_dsp/FilteringFunctions/arm_conv_partial_q15.c
1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
4 * $Date: 17. January 2013
7 * Project: CMSIS DSP Library
8 * Title: arm_conv_partial_q15.c
10 * Description: Partial convolution of Q15 sequences.
12 * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
23 * - Neither the name of ARM LIMITED nor the names of its contributors
24 * may be used to endorse or promote products derived from this
25 * software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
44 * @ingroup groupFilters
48 * @addtogroup PartialConv
53 * @brief Partial convolution of Q15 sequences.
54 * @param[in] *pSrcA points to the first input sequence.
55 * @param[in] srcALen length of the first input sequence.
56 * @param[in] *pSrcB points to the second input sequence.
57 * @param[in] srcBLen length of the second input sequence.
58 * @param[out] *pDst points to the location where the output result is written.
59 * @param[in] firstIndex is the first output sample to start with.
60 * @param[in] numPoints is the number of output points to be computed.
61 * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
63 * Refer to <code>arm_conv_partial_fast_q15()</code> for a faster but less precise version of this function for Cortex-M3 and Cortex-M4.
66 * Refer the function <code>arm_conv_partial_opt_q15()</code> for a faster implementation of this function using scratch buffers.
71 arm_status
arm_conv_partial_q15 (
81 #if (defined(ARM_MATH_CM4) || defined(ARM_MATH_CM3)) && !defined(UNALIGNED_SUPPORT_DISABLE)
83 /* Run the below code for Cortex-M4 and Cortex-M3 */
85 q15_t
* pIn1
; /* inputA pointer */
86 q15_t
* pIn2
; /* inputB pointer */
87 q15_t
* pOut
= pDst
; /* output pointer */
88 q63_t sum
, acc0
, acc1
, acc2
, acc3
; /* Accumulator */
89 q15_t
* px
; /* Intermediate inputA pointer */
90 q15_t
* py
; /* Intermediate inputB pointer */
91 q15_t
* pSrc1
, * pSrc2
; /* Intermediate pointers */
92 q31_t x0
, x1
, x2
, x3
, c0
; /* Temporary input variables */
93 uint32_t j
, k
, count
, check
, blkCnt
;
94 int32_t blockSize1
, blockSize2
, blockSize3
; /* loop counter */
95 arm_status status
; /* status of Partial convolution */
97 /* Check for range of output samples to be calculated */
98 if (( firstIndex
+ numPoints
) > (( srcALen
+ ( srcBLen
- 1u ))))
100 /* Set status as ARM_MATH_ARGUMENT_ERROR */
101 status
= ARM_MATH_ARGUMENT_ERROR
;
106 /* The algorithm implementation is based on the lengths of the inputs. */
107 /* srcB is always made to slide across srcA. */
108 /* So srcBLen is always considered as shorter or equal to srcALen */
109 if ( srcALen
>= srcBLen
)
111 /* Initialization of inputA pointer */
114 /* Initialization of inputB pointer */
119 /* Initialization of inputA pointer */
122 /* Initialization of inputB pointer */
125 /* srcBLen is always considered as shorter or equal to srcALen */
131 /* Conditions to check which loopCounter holds
132 * the first and last indices of the output samples to be calculated. */
133 check
= firstIndex
+ numPoints
;
134 blockSize3
= (( int32_t ) check
- ( int32_t ) srcALen
);
135 blockSize3
= ( blockSize3
> 0 ) ? blockSize3
: 0 ;
136 blockSize1
= ((( int32_t ) srcBLen
- 1 ) - ( int32_t ) firstIndex
);
137 blockSize1
= ( blockSize1
> 0 ) ? (( check
> ( srcBLen
- 1u )) ? blockSize1
:
138 ( int32_t ) numPoints
) : 0 ;
139 blockSize2
= ( int32_t ) check
- (( blockSize3
+ blockSize1
) +
140 ( int32_t ) firstIndex
);
141 blockSize2
= ( blockSize2
> 0 ) ? blockSize2
: 0 ;
143 /* conv(x,y) at n = x[n] * y[0] + x[n-1] * y[1] + x[n-2] * y[2] + ...+ x[n-N+1] * y[N -1] */
144 /* The function is internally
145 * divided into three stages according to the number of multiplications that has to be
146 * taken place between inputA samples and inputB samples. In the first stage of the
147 * algorithm, the multiplications increase by one for every iteration.
148 * In the second stage of the algorithm, srcBLen number of multiplications are done.
149 * In the third stage of the algorithm, the multiplications decrease by one
150 * for every iteration. */
152 /* Set the output pointer to point to the firstIndex
153 * of the output sample to be calculated. */
154 pOut
= pDst
+ firstIndex
;
156 /* --------------------------
157 * Initializations of stage1
158 * -------------------------*/
161 * sum = x[0] * y[1] + x[1] * y[0]
163 * sum = x[0] * y[srcBlen - 1] + x[1] * y[srcBlen - 2] +...+ x[srcBLen - 1] * y[0]
166 /* In this stage the MAC operations are increased by 1 for every iteration.
167 The count variable holds the number of MAC operations performed.
168 Since the partial convolution starts from firstIndex
169 Number of Macs to be performed is firstIndex + 1 */
170 count
= 1u + firstIndex
;
172 /* Working pointer of inputA */
175 /* Working pointer of inputB */
176 pSrc2
= pIn2
+ firstIndex
;
179 /* ------------------------
181 * ----------------------*/
183 /* For loop unrolling by 4, this stage is divided into two. */
184 /* First part of this stage computes the MAC operations less than 4 */
185 /* Second part of this stage computes the MAC operations greater than or equal to 4 */
187 /* The first part of the stage starts here */
188 while (( count
< 4u ) && ( blockSize1
> 0 ))
190 /* Accumulator is made zero for every iteration */
193 /* Loop over number of MAC operations between
194 * inputA samples and inputB samples */
199 /* Perform the multiply-accumulates */
200 sum
= __SMLALD (* px
++, * py
--, sum
);
202 /* Decrement the loop counter */
206 /* Store the result in the accumulator in the destination buffer. */
207 * pOut
++ = ( q15_t
) ( __SSAT (( sum
>> 15 ), 16 ));
209 /* Update the inputA and inputB pointers for next MAC calculation */
213 /* Increment the MAC count */
216 /* Decrement the loop counter */
220 /* The second part of the stage starts here */
221 /* The internal loop, over count, is unrolled by 4 */
222 /* To, read the last two inputB samples using SIMD:
223 * y[srcBLen] and y[srcBLen-1] coefficients, py is decremented by 1 */
226 while ( blockSize1
> 0 )
228 /* Accumulator is made zero for every iteration */
231 /* Apply loop unrolling and compute 4 MACs simultaneously. */
234 /* First part of the processing with loop unrolling. Compute 4 MACs at a time.
235 ** a second loop below computes MACs for the remaining 1 to 3 samples. */
238 /* Perform the multiply-accumulates */
239 /* x[0], x[1] are multiplied with y[srcBLen - 1], y[srcBLen - 2] respectively */
240 sum
= __SMLALDX (* __SIMD32 ( px
)++, * __SIMD32 ( py
)--, sum
);
241 /* x[2], x[3] are multiplied with y[srcBLen - 3], y[srcBLen - 4] respectively */
242 sum
= __SMLALDX (* __SIMD32 ( px
)++, * __SIMD32 ( py
)--, sum
);
244 /* Decrement the loop counter */
248 /* For the next MAC operations, the pointer py is used without SIMD
249 * So, py is incremented by 1 */
252 /* If the count is not a multiple of 4, compute any remaining MACs here.
253 ** No loop unrolling is used. */
258 /* Perform the multiply-accumulates */
259 sum
= __SMLALD (* px
++, * py
--, sum
);
261 /* Decrement the loop counter */
265 /* Store the result in the accumulator in the destination buffer. */
266 * pOut
++ = ( q15_t
) ( __SSAT (( sum
>> 15 ), 16 ));
268 /* Update the inputA and inputB pointers for next MAC calculation */
272 /* Increment the MAC count */
275 /* Decrement the loop counter */
279 /* --------------------------
280 * Initializations of stage2
281 * ------------------------*/
283 /* sum = x[0] * y[srcBLen-1] + x[1] * y[srcBLen-2] +...+ x[srcBLen-1] * y[0]
284 * sum = x[1] * y[srcBLen-1] + x[2] * y[srcBLen-2] +...+ x[srcBLen] * y[0]
286 * sum = x[srcALen-srcBLen-2] * y[srcBLen-1] + x[srcALen] * y[srcBLen-2] +...+ x[srcALen-1] * y[0]
289 /* Working pointer of inputA */
292 /* Working pointer of inputB */
293 pSrc2
= pIn2
+ ( srcBLen
- 1u );
296 /* count is the index by which the pointer pIn1 to be incremented */
300 /* --------------------
302 * -------------------*/
304 /* Stage2 depends on srcBLen as in this stage srcBLen number of MACS are performed.
305 * So, to loop unroll over blockSize2,
306 * srcBLen should be greater than or equal to 4 */
309 /* Loop unroll over blockSize2, by 4 */
310 blkCnt
= blockSize2
>> 2u ;
316 /* Set all accumulators to zero */
323 /* read x[0], x[1] samples */
325 /* read x[1], x[2] samples */
326 x1
= _SIMD32_OFFSET ( px
+ 1 );
330 /* Apply loop unrolling and compute 4 MACs simultaneously. */
333 /* First part of the processing with loop unrolling. Compute 4 MACs at a time.
334 ** a second loop below computes MACs for the remaining 1 to 3 samples. */
337 /* Read the last two inputB samples using SIMD:
338 * y[srcBLen - 1] and y[srcBLen - 2] */
339 c0
= * __SIMD32 ( py
)--;
341 /* acc0 += x[0] * y[srcBLen - 1] + x[1] * y[srcBLen - 2] */
342 acc0
= __SMLALDX ( x0
, c0
, acc0
);
344 /* acc1 += x[1] * y[srcBLen - 1] + x[2] * y[srcBLen - 2] */
345 acc1
= __SMLALDX ( x1
, c0
, acc1
);
347 /* Read x[2], x[3] */
350 /* Read x[3], x[4] */
351 x3
= _SIMD32_OFFSET ( px
+ 1 );
353 /* acc2 += x[2] * y[srcBLen - 1] + x[3] * y[srcBLen - 2] */
354 acc2
= __SMLALDX ( x2
, c0
, acc2
);
356 /* acc3 += x[3] * y[srcBLen - 1] + x[4] * y[srcBLen - 2] */
357 acc3
= __SMLALDX ( x3
, c0
, acc3
);
359 /* Read y[srcBLen - 3] and y[srcBLen - 4] */
360 c0
= * __SIMD32 ( py
)--;
362 /* acc0 += x[2] * y[srcBLen - 3] + x[3] * y[srcBLen - 4] */
363 acc0
= __SMLALDX ( x2
, c0
, acc0
);
365 /* acc1 += x[3] * y[srcBLen - 3] + x[4] * y[srcBLen - 4] */
366 acc1
= __SMLALDX ( x3
, c0
, acc1
);
368 /* Read x[4], x[5] */
369 x0
= _SIMD32_OFFSET ( px
+ 2 );
371 /* Read x[5], x[6] */
372 x1
= _SIMD32_OFFSET ( px
+ 3 );
375 /* acc2 += x[4] * y[srcBLen - 3] + x[5] * y[srcBLen - 4] */
376 acc2
= __SMLALDX ( x0
, c0
, acc2
);
378 /* acc3 += x[5] * y[srcBLen - 3] + x[6] * y[srcBLen - 4] */
379 acc3
= __SMLALDX ( x1
, c0
, acc3
);
383 /* For the next MAC operations, SIMD is not used
384 * So, the 16 bit pointer if inputB, py is updated */
386 /* If the srcBLen is not a multiple of 4, compute any remaining MACs here.
387 ** No loop unrolling is used. */
392 /* Read y[srcBLen - 5] */
395 #ifdef ARM_MATH_BIG_ENDIAN
401 c0
= c0
& 0x0000FFFF ;
403 #endif /* #ifdef ARM_MATH_BIG_ENDIAN */
409 /* Perform the multiply-accumulates */
410 acc0
= __SMLALD ( x0
, c0
, acc0
);
411 acc1
= __SMLALD ( x1
, c0
, acc1
);
412 acc2
= __SMLALDX ( x1
, c0
, acc2
);
413 acc3
= __SMLALDX ( x3
, c0
, acc3
);
418 /* Read y[srcBLen - 5], y[srcBLen - 6] */
419 c0
= _SIMD32_OFFSET ( py
);
421 /* Read x[7], x[8] */
425 x2
= _SIMD32_OFFSET ( px
+ 1 );
428 /* Perform the multiply-accumulates */
429 acc0
= __SMLALDX ( x0
, c0
, acc0
);
430 acc1
= __SMLALDX ( x1
, c0
, acc1
);
431 acc2
= __SMLALDX ( x3
, c0
, acc2
);
432 acc3
= __SMLALDX ( x2
, c0
, acc3
);
437 /* Read y[srcBLen - 5], y[srcBLen - 6] */
438 c0
= _SIMD32_OFFSET ( py
);
440 /* Read x[7], x[8] */
444 x2
= _SIMD32_OFFSET ( px
+ 1 );
446 /* Perform the multiply-accumulates */
447 acc0
= __SMLALDX ( x0
, c0
, acc0
);
448 acc1
= __SMLALDX ( x1
, c0
, acc1
);
449 acc2
= __SMLALDX ( x3
, c0
, acc2
);
450 acc3
= __SMLALDX ( x2
, c0
, acc3
);
454 #ifdef ARM_MATH_BIG_ENDIAN
459 c0
= c0
& 0x0000FFFF ;
460 #endif /* #ifdef ARM_MATH_BIG_ENDIAN */
463 x3
= _SIMD32_OFFSET ( px
+ 2 );
466 /* Perform the multiply-accumulates */
467 acc0
= __SMLALDX ( x1
, c0
, acc0
);
468 acc1
= __SMLALD ( x2
, c0
, acc1
);
469 acc2
= __SMLALDX ( x2
, c0
, acc2
);
470 acc3
= __SMLALDX ( x3
, c0
, acc3
);
474 /* Store the results in the accumulators in the destination buffer. */
476 #ifndef ARM_MATH_BIG_ENDIAN
479 __PKHBT ( __SSAT (( acc0
>> 15 ), 16 ), __SSAT (( acc1
>> 15 ), 16 ), 16 );
481 __PKHBT ( __SSAT (( acc2
>> 15 ), 16 ), __SSAT (( acc3
>> 15 ), 16 ), 16 );
486 __PKHBT ( __SSAT (( acc1
>> 15 ), 16 ), __SSAT (( acc0
>> 15 ), 16 ), 16 );
488 __PKHBT ( __SSAT (( acc3
>> 15 ), 16 ), __SSAT (( acc2
>> 15 ), 16 ), 16 );
490 #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
492 /* Increment the pointer pIn1 index, count by 4 */
495 /* Update the inputA and inputB pointers for next MAC calculation */
499 /* Decrement the loop counter */
503 /* If the blockSize2 is not a multiple of 4, compute any remaining output samples here.
504 ** No loop unrolling is used. */
505 blkCnt
= ( uint32_t ) blockSize2
% 0x4 u
;
509 /* Accumulator is made zero for every iteration */
512 /* Apply loop unrolling and compute 4 MACs simultaneously. */
515 /* First part of the processing with loop unrolling. Compute 4 MACs at a time.
516 ** a second loop below computes MACs for the remaining 1 to 3 samples. */
519 /* Perform the multiply-accumulates */
520 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
521 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
522 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
523 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
525 /* Decrement the loop counter */
529 /* If the srcBLen is not a multiple of 4, compute any remaining MACs here.
530 ** No loop unrolling is used. */
535 /* Perform the multiply-accumulates */
536 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
538 /* Decrement the loop counter */
542 /* Store the result in the accumulator in the destination buffer. */
543 * pOut
++ = ( q15_t
) ( __SSAT ( sum
>> 15 , 16 ));
545 /* Increment the pointer pIn1 index, count by 1 */
548 /* Update the inputA and inputB pointers for next MAC calculation */
552 /* Decrement the loop counter */
558 /* If the srcBLen is not a multiple of 4,
559 * the blockSize2 loop cannot be unrolled by 4 */
560 blkCnt
= ( uint32_t ) blockSize2
;
564 /* Accumulator is made zero for every iteration */
567 /* srcBLen number of MACS should be performed */
572 /* Perform the multiply-accumulate */
573 sum
+= ( q63_t
) (( q31_t
) * px
++ * * py
--);
575 /* Decrement the loop counter */
579 /* Store the result in the accumulator in the destination buffer. */
580 * pOut
++ = ( q15_t
) ( __SSAT ( sum
>> 15 , 16 ));
582 /* Increment the MAC count */
585 /* Update the inputA and inputB pointers for next MAC calculation */
589 /* Decrement the loop counter */
595 /* --------------------------
596 * Initializations of stage3
597 * -------------------------*/
599 /* sum += x[srcALen-srcBLen+1] * y[srcBLen-1] + x[srcALen-srcBLen+2] * y[srcBLen-2] +...+ x[srcALen-1] * y[1]
600 * sum += x[srcALen-srcBLen+2] * y[srcBLen-1] + x[srcALen-srcBLen+3] * y[srcBLen-2] +...+ x[srcALen-1] * y[2]
602 * sum += x[srcALen-2] * y[srcBLen-1] + x[srcALen-1] * y[srcBLen-2]
603 * sum += x[srcALen-1] * y[srcBLen-1]
606 /* In this stage the MAC operations are decreased by 1 for every iteration.
607 The count variable holds the number of MAC operations performed */
608 count
= srcBLen
- 1u ;
610 /* Working pointer of inputA */
611 pSrc1
= ( pIn1
+ srcALen
) - ( srcBLen
- 1u );
614 /* Working pointer of inputB */
615 pSrc2
= pIn2
+ ( srcBLen
- 1u );
619 /* -------------------
621 * ------------------*/
623 /* For loop unrolling by 4, this stage is divided into two. */
624 /* First part of this stage computes the MAC operations greater than 4 */
625 /* Second part of this stage computes the MAC operations less than or equal to 4 */
627 /* The first part of the stage starts here */
630 while (( j
> 0u ) && ( blockSize3
> 0 ))
632 /* Accumulator is made zero for every iteration */
635 /* Apply loop unrolling and compute 4 MACs simultaneously. */
638 /* First part of the processing with loop unrolling. Compute 4 MACs at a time.
639 ** a second loop below computes MACs for the remaining 1 to 3 samples. */
642 /* x[srcALen - srcBLen + 1], x[srcALen - srcBLen + 2] are multiplied
643 * with y[srcBLen - 1], y[srcBLen - 2] respectively */
644 sum
= __SMLALDX (* __SIMD32 ( px
)++, * __SIMD32 ( py
)--, sum
);
645 /* x[srcALen - srcBLen + 3], x[srcALen - srcBLen + 4] are multiplied
646 * with y[srcBLen - 3], y[srcBLen - 4] respectively */
647 sum
= __SMLALDX (* __SIMD32 ( px
)++, * __SIMD32 ( py
)--, sum
);
649 /* Decrement the loop counter */
653 /* For the next MAC operations, the pointer py is used without SIMD
654 * So, py is incremented by 1 */
657 /* If the count is not a multiple of 4, compute any remaining MACs here.
658 ** No loop unrolling is used. */
663 /* sum += x[srcALen - srcBLen + 5] * y[srcBLen - 5] */
664 sum
= __SMLALD (* px
++, * py
--, sum
);
666 /* Decrement the loop counter */
670 /* Store the result in the accumulator in the destination buffer. */
671 * pOut
++ = ( q15_t
) ( __SSAT (( sum
>> 15 ), 16 ));
673 /* Update the inputA and inputB pointers for next MAC calculation */
677 /* Decrement the MAC count */
680 /* Decrement the loop counter */
686 /* The second part of the stage starts here */
687 /* SIMD is not used for the next MAC operations,
688 * so pointer py is updated to read only one sample at a time */
691 while ( blockSize3
> 0 )
693 /* Accumulator is made zero for every iteration */
696 /* Apply loop unrolling and compute 4 MACs simultaneously. */
701 /* Perform the multiply-accumulates */
702 /* sum += x[srcALen-1] * y[srcBLen-1] */
703 sum
= __SMLALD (* px
++, * py
--, sum
);
705 /* Decrement the loop counter */
709 /* Store the result in the accumulator in the destination buffer. */
710 * pOut
++ = ( q15_t
) ( __SSAT (( sum
>> 15 ), 16 ));
712 /* Update the inputA and inputB pointers for next MAC calculation */
716 /* Decrement the MAC count */
719 /* Decrement the loop counter */
723 /* set status as ARM_MATH_SUCCESS */
724 status
= ARM_MATH_SUCCESS
;
727 /* Return to application */
732 /* Run the below code for Cortex-M0 */
734 q15_t
* pIn1
= pSrcA
; /* inputA pointer */
735 q15_t
* pIn2
= pSrcB
; /* inputB pointer */
736 q63_t sum
; /* Accumulator */
737 uint32_t i
, j
; /* loop counters */
738 arm_status status
; /* status of Partial convolution */
740 /* Check for range of output samples to be calculated */
741 if (( firstIndex
+ numPoints
) > (( srcALen
+ ( srcBLen
- 1u ))))
743 /* Set status as ARM_ARGUMENT_ERROR */
744 status
= ARM_MATH_ARGUMENT_ERROR
;
748 /* Loop to calculate convolution for output length number of values */
749 for ( i
= firstIndex
; i
<= ( firstIndex
+ numPoints
- 1 ); i
++)
751 /* Initialize sum with zero to carry on MAC operations */
754 /* Loop to perform MAC operations according to convolution equation */
755 for ( j
= 0 ; j
<= i
; j
++)
757 /* Check the array limitations */
758 if ((( i
- j
) < srcBLen
) && ( j
< srcALen
))
760 /* z[i] += x[i-j] * y[j] */
761 sum
+= (( q31_t
) pIn1
[ j
] * ( pIn2
[ i
- j
]));
765 /* Store the output in the destination buffer */
766 pDst
[ i
] = ( q15_t
) __SSAT (( sum
>> 15u ), 16u );
768 /* set status as ARM_SUCCESS as there are no argument errors */
769 status
= ARM_MATH_SUCCESS
;
773 #endif /* #if (defined(ARM_MATH_CM4) || defined(ARM_MATH_CM3)) && !defined(UNALIGNED_SUPPORT_DISABLE) */
778 * @} end of PartialConv group