]> git.gir.st - tmk_keyboard.git/blob - tool/mbed/mbed-sdk/libraries/mbed/targets/cmsis/core_caFunc.h
Squashed 'tmk_core/' changes from 7967731..b9e0ea0
[tmk_keyboard.git] / tool / mbed / mbed-sdk / libraries / mbed / targets / cmsis / core_caFunc.h
1 /**************************************************************************//**
2 * @file core_caFunc.h
3 * @brief CMSIS Cortex-A Core Function Access Header File
4 * @version V3.10
5 * @date 9 May 2013
6 *
7 * @note
8 *
9 ******************************************************************************/
10 /* Copyright (c) 2009 - 2012 ARM LIMITED
11
12 All rights reserved.
13 Redistribution and use in source and binary forms, with or without
14 modification, are permitted provided that the following conditions are met:
15 - Redistributions of source code must retain the above copyright
16 notice, this list of conditions and the following disclaimer.
17 - Redistributions in binary form must reproduce the above copyright
18 notice, this list of conditions and the following disclaimer in the
19 documentation and/or other materials provided with the distribution.
20 - Neither the name of ARM nor the names of its contributors may be used
21 to endorse or promote products derived from this software without
22 specific prior written permission.
23 *
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 POSSIBILITY OF SUCH DAMAGE.
35 ---------------------------------------------------------------------------*/
36
37
38 #ifndef __CORE_CAFUNC_H__
39 #define __CORE_CAFUNC_H__
40
41
42 /* ########################### Core Function Access ########################### */
43 /** \ingroup CMSIS_Core_FunctionInterface
44 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
45 @{
46 */
47
48 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
49 /* ARM armcc specific functions */
50
51 #if (__ARMCC_VERSION < 400677)
52 #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
53 #endif
54
55 #define MODE_USR 0x10
56 #define MODE_FIQ 0x11
57 #define MODE_IRQ 0x12
58 #define MODE_SVC 0x13
59 #define MODE_MON 0x16
60 #define MODE_ABT 0x17
61 #define MODE_HYP 0x1A
62 #define MODE_UND 0x1B
63 #define MODE_SYS 0x1F
64
65 /** \brief Get APSR Register
66
67 This function returns the content of the APSR Register.
68
69 \return APSR Register value
70 */
71 __STATIC_INLINE uint32_t __get_APSR(void)
72 {
73 register uint32_t __regAPSR __ASM("apsr");
74 return(__regAPSR);
75 }
76
77
78 /** \brief Get CPSR Register
79
80 This function returns the content of the CPSR Register.
81
82 \return CPSR Register value
83 */
84 __STATIC_INLINE uint32_t __get_CPSR(void)
85 {
86 register uint32_t __regCPSR __ASM("cpsr");
87 return(__regCPSR);
88 }
89
90 /** \brief Set Stack Pointer
91
92 This function assigns the given value to the current stack pointer.
93
94 \param [in] topOfStack Stack Pointer value to set
95 */
96 register uint32_t __regSP __ASM("sp");
97 __STATIC_INLINE void __set_SP(uint32_t topOfStack)
98 {
99 __regSP = topOfStack;
100 }
101
102
103 /** \brief Get link register
104
105 This function returns the value of the link register
106
107 \return Value of link register
108 */
109 register uint32_t __reglr __ASM("lr");
110 __STATIC_INLINE uint32_t __get_LR(void)
111 {
112 return(__reglr);
113 }
114
115 /** \brief Set link register
116
117 This function sets the value of the link register
118
119 \param [in] lr LR value to set
120 */
121 __STATIC_INLINE void __set_LR(uint32_t lr)
122 {
123 __reglr = lr;
124 }
125
126 /** \brief Set Process Stack Pointer
127
128 This function assigns the given value to the USR/SYS Stack Pointer (PSP).
129
130 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
131 */
132 __STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
133 {
134 ARM
135 PRESERVE8
136
137 BIC R0, R0, #7 ;ensure stack is 8-byte aligned
138 MRS R1, CPSR
139 CPS #MODE_SYS ;no effect in USR mode
140 MOV SP, R0
141 MSR CPSR_c, R1 ;no effect in USR mode
142 ISB
143 BX LR
144
145 }
146
147 /** \brief Set User Mode
148
149 This function changes the processor state to User Mode
150
151 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
152 */
153 __STATIC_ASM void __set_CPS_USR(void)
154 {
155 ARM
156
157 CPS #MODE_USR
158 BX LR
159 }
160
161
162 /** \brief Enable FIQ
163
164 This function enables FIQ interrupts by clearing the F-bit in the CPSR.
165 Can only be executed in Privileged modes.
166 */
167 #define __enable_fault_irq __enable_fiq
168
169
170 /** \brief Disable FIQ
171
172 This function disables FIQ interrupts by setting the F-bit in the CPSR.
173 Can only be executed in Privileged modes.
174 */
175 #define __disable_fault_irq __disable_fiq
176
177
178 /** \brief Get FPSCR
179
180 This function returns the current value of the Floating Point Status/Control register.
181
182 \return Floating Point Status/Control register value
183 */
184 __STATIC_INLINE uint32_t __get_FPSCR(void)
185 {
186 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
187 register uint32_t __regfpscr __ASM("fpscr");
188 return(__regfpscr);
189 #else
190 return(0);
191 #endif
192 }
193
194
195 /** \brief Set FPSCR
196
197 This function assigns the given value to the Floating Point Status/Control register.
198
199 \param [in] fpscr Floating Point Status/Control value to set
200 */
201 __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
202 {
203 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
204 register uint32_t __regfpscr __ASM("fpscr");
205 __regfpscr = (fpscr);
206 #endif
207 }
208
209 /** \brief Get FPEXC
210
211 This function returns the current value of the Floating Point Exception Control register.
212
213 \return Floating Point Exception Control register value
214 */
215 __STATIC_INLINE uint32_t __get_FPEXC(void)
216 {
217 #if (__FPU_PRESENT == 1)
218 register uint32_t __regfpexc __ASM("fpexc");
219 return(__regfpexc);
220 #else
221 return(0);
222 #endif
223 }
224
225
226 /** \brief Set FPEXC
227
228 This function assigns the given value to the Floating Point Exception Control register.
229
230 \param [in] fpscr Floating Point Exception Control value to set
231 */
232 __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
233 {
234 #if (__FPU_PRESENT == 1)
235 register uint32_t __regfpexc __ASM("fpexc");
236 __regfpexc = (fpexc);
237 #endif
238 }
239
240 /** \brief Get CPACR
241
242 This function returns the current value of the Coprocessor Access Control register.
243
244 \return Coprocessor Access Control register value
245 */
246 __STATIC_INLINE uint32_t __get_CPACR(void)
247 {
248 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
249 return __regCPACR;
250 }
251
252 /** \brief Set CPACR
253
254 This function assigns the given value to the Coprocessor Access Control register.
255
256 \param [in] cpacr Coporcessor Acccess Control value to set
257 */
258 __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
259 {
260 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
261 __regCPACR = cpacr;
262 __ISB();
263 }
264
265 /** \brief Get CBAR
266
267 This function returns the value of the Configuration Base Address register.
268
269 \return Configuration Base Address register value
270 */
271 __STATIC_INLINE uint32_t __get_CBAR() {
272 register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
273 return(__regCBAR);
274 }
275
276 /** \brief Get TTBR0
277
278 This function returns the value of the Configuration Base Address register.
279
280 \return Translation Table Base Register 0 value
281 */
282 __STATIC_INLINE uint32_t __get_TTBR0() {
283 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
284 return(__regTTBR0);
285 }
286
287 /** \brief Set TTBR0
288
289 This function assigns the given value to the Coprocessor Access Control register.
290
291 \param [in] ttbr0 Translation Table Base Register 0 value to set
292 */
293 __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
294 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
295 __regTTBR0 = ttbr0;
296 __ISB();
297 }
298
299 /** \brief Get DACR
300
301 This function returns the value of the Domain Access Control Register.
302
303 \return Domain Access Control Register value
304 */
305 __STATIC_INLINE uint32_t __get_DACR() {
306 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
307 return(__regDACR);
308 }
309
310 /** \brief Set DACR
311
312 This function assigns the given value to the Coprocessor Access Control register.
313
314 \param [in] dacr Domain Access Control Register value to set
315 */
316 __STATIC_INLINE void __set_DACR(uint32_t dacr) {
317 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
318 __regDACR = dacr;
319 __ISB();
320 }
321
322 /******************************** Cache and BTAC enable ****************************************************/
323
324 /** \brief Set SCTLR
325
326 This function assigns the given value to the System Control Register.
327
328 \param [in] sctlr System Control Register, value to set
329 */
330 __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
331 {
332 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
333 __regSCTLR = sctlr;
334 }
335
336 /** \brief Get SCTLR
337
338 This function returns the value of the System Control Register.
339
340 \return System Control Register value
341 */
342 __STATIC_INLINE uint32_t __get_SCTLR() {
343 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
344 return(__regSCTLR);
345 }
346
347 /** \brief Enable Caches
348
349 Enable Caches
350 */
351 __STATIC_INLINE void __enable_caches(void) {
352 // Set I bit 12 to enable I Cache
353 // Set C bit 2 to enable D Cache
354 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
355 }
356
357 /** \brief Disable Caches
358
359 Disable Caches
360 */
361 __STATIC_INLINE void __disable_caches(void) {
362 // Clear I bit 12 to disable I Cache
363 // Clear C bit 2 to disable D Cache
364 __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
365 __ISB();
366 }
367
368 /** \brief Enable BTAC
369
370 Enable BTAC
371 */
372 __STATIC_INLINE void __enable_btac(void) {
373 // Set Z bit 11 to enable branch prediction
374 __set_SCTLR( __get_SCTLR() | (1 << 11));
375 __ISB();
376 }
377
378 /** \brief Disable BTAC
379
380 Disable BTAC
381 */
382 __STATIC_INLINE void __disable_btac(void) {
383 // Clear Z bit 11 to disable branch prediction
384 __set_SCTLR( __get_SCTLR() & ~(1 << 11));
385 }
386
387
388 /** \brief Enable MMU
389
390 Enable MMU
391 */
392 __STATIC_INLINE void __enable_mmu(void) {
393 // Set M bit 0 to enable the MMU
394 // Set AFE bit to enable simplified access permissions model
395 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
396 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
397 __ISB();
398 }
399
400 /** \brief Enable MMU
401
402 Enable MMU
403 */
404 __STATIC_INLINE void __disable_mmu(void) {
405 // Clear M bit 0 to disable the MMU
406 __set_SCTLR( __get_SCTLR() & ~1);
407 __ISB();
408 }
409
410 /******************************** TLB maintenance operations ************************************************/
411 /** \brief Invalidate the whole tlb
412
413 TLBIALL. Invalidate the whole tlb
414 */
415
416 __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
417 register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
418 __TLBIALL = 0;
419 __DSB();
420 __ISB();
421 }
422
423 /******************************** BTB maintenance operations ************************************************/
424 /** \brief Invalidate entire branch predictor array
425
426 BPIALL. Branch Predictor Invalidate All.
427 */
428
429 __STATIC_INLINE void __v7_inv_btac(void) {
430 register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
431 __BPIALL = 0;
432 __DSB(); //ensure completion of the invalidation
433 __ISB(); //ensure instruction fetch path sees new state
434 }
435
436
437 /******************************** L1 cache operations ******************************************************/
438
439 /** \brief Invalidate the whole I$
440
441 ICIALLU. Instruction Cache Invalidate All to PoU
442 */
443 __STATIC_INLINE void __v7_inv_icache_all(void) {
444 register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
445 __ICIALLU = 0;
446 __DSB(); //ensure completion of the invalidation
447 __ISB(); //ensure instruction fetch path sees new I cache state
448 }
449
450 /** \brief Clean D$ by MVA
451
452 DCCMVAC. Data cache clean by MVA to PoC
453 */
454 __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
455 register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
456 __DCCMVAC = (uint32_t)va;
457 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
458 }
459
460 /** \brief Invalidate D$ by MVA
461
462 DCIMVAC. Data cache invalidate by MVA to PoC
463 */
464 __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
465 register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
466 __DCIMVAC = (uint32_t)va;
467 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
468 }
469
470 /** \brief Clean and Invalidate D$ by MVA
471
472 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
473 */
474 __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
475 register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
476 __DCCIMVAC = (uint32_t)va;
477 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
478 }
479
480 /** \brief
481 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
482 */
483 #pragma push
484 #pragma arm
485 __STATIC_ASM void __v7_all_cache(uint32_t op) {
486 ARM
487
488 PUSH {R4-R11}
489
490 MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
491 ANDS R3, R6, #0x07000000 // Extract coherency level
492 MOV R3, R3, LSR #23 // Total cache levels << 1
493 BEQ Finished // If 0, no need to clean
494
495 MOV R10, #0 // R10 holds current cache level << 1
496 Loop1 ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
497 MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
498 AND R1, R1, #7 // Isolate those lower 3 bits
499 CMP R1, #2
500 BLT Skip // No cache or only instruction cache at this level
501
502 MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
503 ISB // ISB to sync the change to the CacheSizeID reg
504 MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
505 AND R2, R1, #7 // Extract the line length field
506 ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
507 LDR R4, =0x3FF
508 ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
509 CLZ R5, R4 // R5 is the bit position of the way size increment
510 LDR R7, =0x7FFF
511 ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
512
513 Loop2 MOV R9, R4 // R9 working copy of the max way size (right aligned)
514
515 Loop3 ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
516 ORR R11, R11, R7, LSL R2 // Factor in the Set number
517 CMP R0, #0
518 BNE Dccsw
519 MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
520 B cont
521 Dccsw CMP R0, #1
522 BNE Dccisw
523 MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
524 B cont
525 Dccisw MCR p15, 0, R11, c7, c14, 2 // DCCISW, Clean and Invalidate by Set/Way
526 cont SUBS R9, R9, #1 // Decrement the Way number
527 BGE Loop3
528 SUBS R7, R7, #1 // Decrement the Set number
529 BGE Loop2
530 Skip ADD R10, R10, #2 // increment the cache number
531 CMP R3, R10
532 BGT Loop1
533
534 Finished
535 DSB
536 POP {R4-R11}
537 BX lr
538
539 }
540 #pragma pop
541
542 /** \brief __v7_all_cache - helper function
543
544 */
545
546 /** \brief Invalidate the whole D$
547
548 DCISW. Invalidate by Set/Way
549 */
550
551 __STATIC_INLINE void __v7_inv_dcache_all(void) {
552 __v7_all_cache(0);
553 }
554
555 /** \brief Clean the whole D$
556
557 DCCSW. Clean by Set/Way
558 */
559
560 __STATIC_INLINE void __v7_clean_dcache_all(void) {
561 __v7_all_cache(1);
562 }
563
564 /** \brief Clean and invalidate the whole D$
565
566 DCCISW. Clean and Invalidate by Set/Way
567 */
568
569 __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
570 __v7_all_cache(2);
571 }
572
573 #include "core_ca_mmu.h"
574
575 #elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
576
577 #error IAR Compiler support not implemented for Cortex-A
578
579 #elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
580
581 /* GNU gcc specific functions */
582
583 #define MODE_USR 0x10
584 #define MODE_FIQ 0x11
585 #define MODE_IRQ 0x12
586 #define MODE_SVC 0x13
587 #define MODE_MON 0x16
588 #define MODE_ABT 0x17
589 #define MODE_HYP 0x1A
590 #define MODE_UND 0x1B
591 #define MODE_SYS 0x1F
592
593
594 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
595 {
596 __ASM volatile ("cpsie i");
597 }
598
599 /** \brief Disable IRQ Interrupts
600
601 This function disables IRQ interrupts by setting the I-bit in the CPSR.
602 Can only be executed in Privileged modes.
603 */
604 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __disable_irq(void)
605 {
606 uint32_t result;
607
608 __ASM volatile ("mrs %0, cpsr" : "=r" (result));
609 __ASM volatile ("cpsid i");
610 return(result & 0x80);
611 }
612
613
614 /** \brief Get APSR Register
615
616 This function returns the content of the APSR Register.
617
618 \return APSR Register value
619 */
620 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
621 {
622 #if 1
623 uint32_t result;
624
625 __ASM volatile ("mrs %0, apsr" : "=r" (result) );
626 return (result);
627 #else
628 register uint32_t __regAPSR __ASM("apsr");
629 return(__regAPSR);
630 #endif
631 }
632
633
634 /** \brief Get CPSR Register
635
636 This function returns the content of the CPSR Register.
637
638 \return CPSR Register value
639 */
640 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPSR(void)
641 {
642 #if 1
643 register uint32_t __regCPSR;
644 __ASM volatile ("mrs %0, cpsr" : "=r" (__regCPSR));
645 #else
646 register uint32_t __regCPSR __ASM("cpsr");
647 #endif
648 return(__regCPSR);
649 }
650
651 #if 0
652 /** \brief Set Stack Pointer
653
654 This function assigns the given value to the current stack pointer.
655
656 \param [in] topOfStack Stack Pointer value to set
657 */
658 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SP(uint32_t topOfStack)
659 {
660 register uint32_t __regSP __ASM("sp");
661 __regSP = topOfStack;
662 }
663 #endif
664
665 /** \brief Get link register
666
667 This function returns the value of the link register
668
669 \return Value of link register
670 */
671 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_LR(void)
672 {
673 register uint32_t __reglr __ASM("lr");
674 return(__reglr);
675 }
676
677 #if 0
678 /** \brief Set link register
679
680 This function sets the value of the link register
681
682 \param [in] lr LR value to set
683 */
684 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_LR(uint32_t lr)
685 {
686 register uint32_t __reglr __ASM("lr");
687 __reglr = lr;
688 }
689 #endif
690
691 /** \brief Set Process Stack Pointer
692
693 This function assigns the given value to the USR/SYS Stack Pointer (PSP).
694
695 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
696 */
697 extern void __set_PSP(uint32_t topOfProcStack);
698
699 /** \brief Set User Mode
700
701 This function changes the processor state to User Mode
702
703 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
704 */
705 extern void __set_CPS_USR(void);
706
707 /** \brief Enable FIQ
708
709 This function enables FIQ interrupts by clearing the F-bit in the CPSR.
710 Can only be executed in Privileged modes.
711 */
712 #define __enable_fault_irq __enable_fiq
713
714
715 /** \brief Disable FIQ
716
717 This function disables FIQ interrupts by setting the F-bit in the CPSR.
718 Can only be executed in Privileged modes.
719 */
720 #define __disable_fault_irq __disable_fiq
721
722
723 /** \brief Get FPSCR
724
725 This function returns the current value of the Floating Point Status/Control register.
726
727 \return Floating Point Status/Control register value
728 */
729 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
730 {
731 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
732 #if 1
733 uint32_t result;
734
735 __ASM volatile ("vmrs %0, fpscr" : "=r" (result) );
736 return (result);
737 #else
738 register uint32_t __regfpscr __ASM("fpscr");
739 return(__regfpscr);
740 #endif
741 #else
742 return(0);
743 #endif
744 }
745
746
747 /** \brief Set FPSCR
748
749 This function assigns the given value to the Floating Point Status/Control register.
750
751 \param [in] fpscr Floating Point Status/Control value to set
752 */
753 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
754 {
755 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
756 #if 1
757 __ASM volatile ("vmsr fpscr, %0" : : "r" (fpscr) );
758 #else
759 register uint32_t __regfpscr __ASM("fpscr");
760 __regfpscr = (fpscr);
761 #endif
762 #endif
763 }
764
765 /** \brief Get FPEXC
766
767 This function returns the current value of the Floating Point Exception Control register.
768
769 \return Floating Point Exception Control register value
770 */
771 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPEXC(void)
772 {
773 #if (__FPU_PRESENT == 1)
774 #if 1
775 uint32_t result;
776
777 __ASM volatile ("vmrs %0, fpexc" : "=r" (result));
778 return (result);
779 #else
780 register uint32_t __regfpexc __ASM("fpexc");
781 return(__regfpexc);
782 #endif
783 #else
784 return(0);
785 #endif
786 }
787
788
789 /** \brief Set FPEXC
790
791 This function assigns the given value to the Floating Point Exception Control register.
792
793 \param [in] fpscr Floating Point Exception Control value to set
794 */
795 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
796 {
797 #if (__FPU_PRESENT == 1)
798 #if 1
799 __ASM volatile ("vmsr fpexc, %0" : : "r" (fpexc));
800 #else
801 register uint32_t __regfpexc __ASM("fpexc");
802 __regfpexc = (fpexc);
803 #endif
804 #endif
805 }
806
807 /** \brief Get CPACR
808
809 This function returns the current value of the Coprocessor Access Control register.
810
811 \return Coprocessor Access Control register value
812 */
813 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPACR(void)
814 {
815 #if 1
816 register uint32_t __regCPACR;
817 __ASM volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (__regCPACR));
818 #else
819 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
820 #endif
821 return __regCPACR;
822 }
823
824 /** \brief Set CPACR
825
826 This function assigns the given value to the Coprocessor Access Control register.
827
828 \param [in] cpacr Coporcessor Acccess Control value to set
829 */
830 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
831 {
832 #if 1
833 __ASM volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r" (cpacr));
834 #else
835 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
836 __regCPACR = cpacr;
837 #endif
838 __ISB();
839 }
840
841 /** \brief Get CBAR
842
843 This function returns the value of the Configuration Base Address register.
844
845 \return Configuration Base Address register value
846 */
847 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CBAR() {
848 #if 1
849 register uint32_t __regCBAR;
850 __ASM volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (__regCBAR));
851 #else
852 register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
853 #endif
854 return(__regCBAR);
855 }
856
857 /** \brief Get TTBR0
858
859 This function returns the value of the Configuration Base Address register.
860
861 \return Translation Table Base Register 0 value
862 */
863 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_TTBR0() {
864 #if 1
865 register uint32_t __regTTBR0;
866 __ASM volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (__regTTBR0));
867 #else
868 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
869 #endif
870 return(__regTTBR0);
871 }
872
873 /** \brief Set TTBR0
874
875 This function assigns the given value to the Coprocessor Access Control register.
876
877 \param [in] ttbr0 Translation Table Base Register 0 value to set
878 */
879 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
880 #if 1
881 __ASM volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttbr0));
882 #else
883 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
884 __regTTBR0 = ttbr0;
885 #endif
886 __ISB();
887 }
888
889 /** \brief Get DACR
890
891 This function returns the value of the Domain Access Control Register.
892
893 \return Domain Access Control Register value
894 */
895 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_DACR() {
896 #if 1
897 register uint32_t __regDACR;
898 __ASM volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (__regDACR));
899 #else
900 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
901 #endif
902 return(__regDACR);
903 }
904
905 /** \brief Set DACR
906
907 This function assigns the given value to the Coprocessor Access Control register.
908
909 \param [in] dacr Domain Access Control Register value to set
910 */
911 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_DACR(uint32_t dacr) {
912 #if 1
913 __ASM volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r" (dacr));
914 #else
915 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
916 __regDACR = dacr;
917 #endif
918 __ISB();
919 }
920
921 /******************************** Cache and BTAC enable ****************************************************/
922
923 /** \brief Set SCTLR
924
925 This function assigns the given value to the System Control Register.
926
927 \param [in] sctlr System Control Register, value to set
928 */
929 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
930 {
931 #if 1
932 __ASM volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
933 #else
934 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
935 __regSCTLR = sctlr;
936 #endif
937 }
938
939 /** \brief Get SCTLR
940
941 This function returns the value of the System Control Register.
942
943 \return System Control Register value
944 */
945 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_SCTLR() {
946 #if 1
947 register uint32_t __regSCTLR;
948 __ASM volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (__regSCTLR));
949 #else
950 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
951 #endif
952 return(__regSCTLR);
953 }
954
955 /** \brief Enable Caches
956
957 Enable Caches
958 */
959 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_caches(void) {
960 // Set I bit 12 to enable I Cache
961 // Set C bit 2 to enable D Cache
962 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
963 }
964
965 /** \brief Disable Caches
966
967 Disable Caches
968 */
969 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_caches(void) {
970 // Clear I bit 12 to disable I Cache
971 // Clear C bit 2 to disable D Cache
972 __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
973 __ISB();
974 }
975
976 /** \brief Enable BTAC
977
978 Enable BTAC
979 */
980 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_btac(void) {
981 // Set Z bit 11 to enable branch prediction
982 __set_SCTLR( __get_SCTLR() | (1 << 11));
983 __ISB();
984 }
985
986 /** \brief Disable BTAC
987
988 Disable BTAC
989 */
990 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_btac(void) {
991 // Clear Z bit 11 to disable branch prediction
992 __set_SCTLR( __get_SCTLR() & ~(1 << 11));
993 }
994
995
996 /** \brief Enable MMU
997
998 Enable MMU
999 */
1000 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_mmu(void) {
1001 // Set M bit 0 to enable the MMU
1002 // Set AFE bit to enable simplified access permissions model
1003 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
1004 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
1005 __ISB();
1006 }
1007
1008 /** \brief Enable MMU
1009
1010 Enable MMU
1011 */
1012 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_mmu(void) {
1013 // Clear M bit 0 to disable the MMU
1014 __set_SCTLR( __get_SCTLR() & ~1);
1015 __ISB();
1016 }
1017
1018 /******************************** TLB maintenance operations ************************************************/
1019 /** \brief Invalidate the whole tlb
1020
1021 TLBIALL. Invalidate the whole tlb
1022 */
1023
1024 __attribute__( ( always_inline ) ) __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
1025 #if 1
1026 __ASM volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
1027 #else
1028 register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
1029 __TLBIALL = 0;
1030 #endif
1031 __DSB();
1032 __ISB();
1033 }
1034
1035 /******************************** BTB maintenance operations ************************************************/
1036 /** \brief Invalidate entire branch predictor array
1037
1038 BPIALL. Branch Predictor Invalidate All.
1039 */
1040
1041 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_btac(void) {
1042 #if 1
1043 __ASM volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
1044 #else
1045 register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
1046 __BPIALL = 0;
1047 #endif
1048 __DSB(); //ensure completion of the invalidation
1049 __ISB(); //ensure instruction fetch path sees new state
1050 }
1051
1052
1053 /******************************** L1 cache operations ******************************************************/
1054
1055 /** \brief Invalidate the whole I$
1056
1057 ICIALLU. Instruction Cache Invalidate All to PoU
1058 */
1059 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_icache_all(void) {
1060 #if 1
1061 __ASM volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
1062 #else
1063 register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
1064 __ICIALLU = 0;
1065 #endif
1066 __DSB(); //ensure completion of the invalidation
1067 __ISB(); //ensure instruction fetch path sees new I cache state
1068 }
1069
1070 /** \brief Clean D$ by MVA
1071
1072 DCCMVAC. Data cache clean by MVA to PoC
1073 */
1074 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
1075 #if 1
1076 __ASM volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((uint32_t)va));
1077 #else
1078 register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
1079 __DCCMVAC = (uint32_t)va;
1080 #endif
1081 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1082 }
1083
1084 /** \brief Invalidate D$ by MVA
1085
1086 DCIMVAC. Data cache invalidate by MVA to PoC
1087 */
1088 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
1089 #if 1
1090 __ASM volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" ((uint32_t)va));
1091 #else
1092 register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
1093 __DCIMVAC = (uint32_t)va;
1094 #endif
1095 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1096 }
1097
1098 /** \brief Clean and Invalidate D$ by MVA
1099
1100 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
1101 */
1102 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
1103 #if 1
1104 __ASM volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" ((uint32_t)va));
1105 #else
1106 register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
1107 __DCCIMVAC = (uint32_t)va;
1108 #endif
1109 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
1110 }
1111
1112 /** \brief
1113 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
1114 */
1115
1116 /** \brief __v7_all_cache - helper function
1117
1118 */
1119
1120 extern void __v7_all_cache(uint32_t op);
1121
1122
1123 /** \brief Invalidate the whole D$
1124
1125 DCISW. Invalidate by Set/Way
1126 */
1127
1128 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_all(void) {
1129 __v7_all_cache(0);
1130 }
1131
1132 /** \brief Clean the whole D$
1133
1134 DCCSW. Clean by Set/Way
1135 */
1136
1137 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_all(void) {
1138 __v7_all_cache(1);
1139 }
1140
1141 /** \brief Clean and invalidate the whole D$
1142
1143 DCCISW. Clean and Invalidate by Set/Way
1144 */
1145
1146 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
1147 __v7_all_cache(2);
1148 }
1149
1150 #include "core_ca_mmu.h"
1151
1152 #elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/
1153
1154 #error TASKING Compiler support not implemented for Cortex-A
1155
1156 #endif
1157
1158 /*@} end of CMSIS_Core_RegAccFunctions */
1159
1160
1161 #endif /* __CORE_CAFUNC_H__ */
Imprint / Impressum