]>
git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/rtos/rtx/TARGET_CORTEX_M/rt_HAL_CM.h
1 /*----------------------------------------------------------------------------
3 *----------------------------------------------------------------------------
5 * Purpose: Hardware Abstraction Layer for Cortex-M definitions
7 *----------------------------------------------------------------------------
9 * Copyright (c) 1999-2009 KEIL, 2009-2012 ARM Germany GmbH
10 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions are met:
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * - Neither the name of ARM nor the names of its contributors may be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *---------------------------------------------------------------------------*/
36 #define INITIAL_xPSR 0x01000000
37 #define DEMCR_TRCENA 0x01000000
38 #define ITM_ITMENA 0x00000001
39 #define MAGIC_WORD 0xE25A2EA5
41 #if defined (__CC_ARM) /* ARM Compiler */
43 #if ((__TARGET_ARCH_7_M || __TARGET_ARCH_7E_M) && !NO_EXCLUSIVE_ACCESS)
44 #define __USE_EXCLUSIVE_ACCESS
46 #undef __USE_EXCLUSIVE_ACCESS
49 #elif defined (__GNUC__) /* GNU Compiler */
51 #undef __USE_EXCLUSIVE_ACCESS
53 #if defined (__CORTEX_M0) || defined (__CORTEX_M0PLUS)
54 #define __TARGET_ARCH_6S_M 1
56 #define __TARGET_ARCH_6S_M 0
59 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
60 #define __TARGET_FPU_VFP 1
62 #define __TARGET_FPU_VFP 0
65 #define __inline inline
66 #define __weak __attribute__((weak))
68 #ifndef __CMSIS_GENERIC
70 __attribute__((always_inline
)) static inline void __enable_irq(void)
72 __asm
volatile ("cpsie i");
75 __attribute__((always_inline
)) static inline U32
__disable_irq(void)
79 __asm
volatile ("mrs %0, primask" : "=r" (result
));
80 __asm
volatile ("cpsid i");
86 __attribute__(( always_inline
)) static inline U8
__clz(U32 value
)
90 __asm
volatile ("clz %0, %1" : "=r" (result
) : "r" (value
));
94 #elif defined (__ICCARM__) /* IAR Compiler */
96 #undef __USE_EXCLUSIVE_ACCESS
98 #if (__CORE__ == __ARM6M__)
99 #define __TARGET_ARCH_6S_M 1
101 #define __TARGET_ARCH_6S_M 0
104 #if defined __ARMVFP__
105 #define __TARGET_FPU_VFP 1
107 #define __TARGET_FPU_VFP 0
110 #define __inline inline
112 #ifndef __CMSIS_GENERIC
114 static inline void __enable_irq(void)
116 __asm
volatile ("cpsie i");
119 static inline U32
__disable_irq(void)
123 __asm
volatile ("mrs %0, primask" : "=r" (result
));
124 __asm
volatile ("cpsid i");
130 static inline U8
__clz(U32 value
)
134 __asm
volatile ("clz %0, %1" : "=r" (result
) : "r" (value
));
141 #define NVIC_ST_CTRL (*((volatile U32 *)0xE000E010))
142 #define NVIC_ST_RELOAD (*((volatile U32 *)0xE000E014))
143 #define NVIC_ST_CURRENT (*((volatile U32 *)0xE000E018))
144 #define NVIC_ISER ((volatile U32 *)0xE000E100)
145 #define NVIC_ICER ((volatile U32 *)0xE000E180)
146 #if (__TARGET_ARCH_6S_M)
147 #define NVIC_IP ((volatile U32 *)0xE000E400)
149 #define NVIC_IP ((volatile U8 *)0xE000E400)
151 #define NVIC_INT_CTRL (*((volatile U32 *)0xE000ED04))
152 #define NVIC_AIR_CTRL (*((volatile U32 *)0xE000ED0C))
153 #define NVIC_SYS_PRI2 (*((volatile U32 *)0xE000ED1C))
154 #define NVIC_SYS_PRI3 (*((volatile U32 *)0xE000ED20))
156 #define OS_PEND_IRQ() NVIC_INT_CTRL = (1<<28)
157 #define OS_PENDING ((NVIC_INT_CTRL >> 26) & (1<<2 | 1))
158 #define OS_UNPEND(fl) NVIC_INT_CTRL = (*fl = OS_PENDING) << 25
159 #define OS_PEND(fl,p) NVIC_INT_CTRL = (fl | p<<2) << 26
160 #define OS_LOCK() NVIC_ST_CTRL = 0x0005
161 #define OS_UNLOCK() NVIC_ST_CTRL = 0x0007
163 #define OS_X_PENDING ((NVIC_INT_CTRL >> 28) & 1)
164 #define OS_X_UNPEND(fl) NVIC_INT_CTRL = (*fl = OS_X_PENDING) << 27
165 #define OS_X_PEND(fl,p) NVIC_INT_CTRL = (fl | p) << 28
166 #if (__TARGET_ARCH_6S_M)
167 #define OS_X_INIT(n) NVIC_IP[n>>2] |= 0xFF << (8*(n & 0x03)); \
168 NVIC_ISER[n>>5] = 1 << (n & 0x1F)
170 #define OS_X_INIT(n) NVIC_IP[n] = 0xFF; \
171 NVIC_ISER[n>>5] = 1 << (n & 0x1F)
173 #define OS_X_LOCK(n) NVIC_ICER[n>>5] = 1 << (n & 0x1F)
174 #define OS_X_UNLOCK(n) NVIC_ISER[n>>5] = 1 << (n & 0x1F)
176 /* Core Debug registers */
177 #define DEMCR (*((volatile U32 *)0xE000EDFC))
180 #define ITM_CONTROL (*((volatile U32 *)0xE0000E80))
181 #define ITM_ENABLE (*((volatile U32 *)0xE0000E00))
182 #define ITM_PORT30_U32 (*((volatile U32 *)0xE0000078))
183 #define ITM_PORT31_U32 (*((volatile U32 *)0xE000007C))
184 #define ITM_PORT31_U16 (*((volatile U16 *)0xE000007C))
185 #define ITM_PORT31_U8 (*((volatile U8 *)0xE000007C))
191 #ifdef __USE_EXCLUSIVE_ACCESS
192 #define rt_inc(p) while(__strex((__ldrex(p)+1),p))
193 #define rt_dec(p) while(__strex((__ldrex(p)-1),p))
195 #define rt_inc(p) __disable_irq();(*p)++;__enable_irq();
196 #define rt_dec(p) __disable_irq();(*p)--;__enable_irq();
199 __inline
static U32
rt_inc_qi (U32 size
, U8
*count
, U8
*first
) {
201 #ifdef __USE_EXCLUSIVE_ACCESS
203 if ((cnt
= __ldrex(count
)) == size
) {
206 } while (__strex(cnt
+1, count
));
208 c2
= (cnt
= __ldrex(first
)) + 1;
209 if (c2
== size
) c2
= 0;
210 } while (__strex(c2
, first
));
213 if ((cnt
= *count
) < size
) {
215 c2
= (cnt
= *first
) + 1;
216 if (c2
== size
) c2
= 0;
224 __inline
static void rt_systick_init (void) {
225 NVIC_ST_RELOAD
= os_trv
;
227 NVIC_ST_CTRL
= 0x0007;
228 NVIC_SYS_PRI3
|= 0xFF000000;
231 __inline
static void rt_svc_init (void) {
232 #if !(__TARGET_ARCH_6S_M)
235 NVIC_SYS_PRI3
|= 0x00FF0000;
236 #if (__TARGET_ARCH_6S_M)
237 NVIC_SYS_PRI2
|= (NVIC_SYS_PRI3
<<(8+1)) & 0xFC000000;
239 sh
= 8 - __clz (~((NVIC_SYS_PRI3
<< 8) & 0xFF000000));
240 prigroup
= ((NVIC_AIR_CTRL
>> 8) & 0x07);
241 if (prigroup
>= sh
) {
244 NVIC_SYS_PRI2
= ((0xFEFFFFFF << sh
) & 0xFF000000) | (NVIC_SYS_PRI2
& 0x00FFFFFF);
248 extern void rt_set_PSP (U32 stack
);
249 extern U32
rt_get_PSP (void);
250 extern void os_set_env (void);
251 extern void *_alloc_box (void *box_mem
);
252 extern int _free_box (void *box_mem
, void *box
);
254 extern void rt_init_stack (P_TCB p_TCB
, FUNCP task_body
);
255 extern void rt_ret_val (P_TCB p_TCB
, U32 v0
);
256 extern void rt_ret_val2 (P_TCB p_TCB
, U32 v0
, U32 v1
);
258 extern void dbg_init (void);
259 extern void dbg_task_notify (P_TCB p_tcb
, BOOL create
);
260 extern void dbg_task_switch (U32 task_id
);
263 #define DBG_INIT() dbg_init()
264 #define DBG_TASK_NOTIFY(p_tcb,create) if (dbg_msg) dbg_task_notify(p_tcb,create)
265 #define DBG_TASK_SWITCH(task_id) if (dbg_msg && (os_tsk.new_tsk != os_tsk.run)) \
266 dbg_task_switch(task_id)
269 #define DBG_TASK_NOTIFY(p_tcb,create)
270 #define DBG_TASK_SWITCH(task_id)
273 /*----------------------------------------------------------------------------
275 *---------------------------------------------------------------------------*/