Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2014 Wind River Systems, Inc.
3 : : * Copyright (c) 2020 Nordic Semiconductor ASA.
4 : : *
5 : : * SPDX-License-Identifier: Apache-2.0
6 : : */
7 : :
8 : : /**
9 : : * @file
10 : : * @brief Common fault handler for ARM Cortex-M
11 : : *
12 : : * Common fault handler for ARM Cortex-M processors.
13 : : */
14 : :
15 : : #include <kernel.h>
16 : : #include <kernel_internal.h>
17 : : #include <inttypes.h>
18 : : #include <exc_handle.h>
19 : : #include <logging/log.h>
20 : : LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
21 : :
22 : : #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
23 : : #define PR_EXC(...) LOG_ERR(__VA_ARGS__)
24 : : #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
25 : : #else
26 : : #define PR_EXC(...)
27 : : #define STORE_xFAR(reg_var, reg)
28 : : #endif /* CONFIG_PRINTK || CONFIG_LOG */
29 : :
30 : : #if (CONFIG_FAULT_DUMP == 2)
31 : : #define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
32 : : #else
33 : : #define PR_FAULT_INFO(...)
34 : : #endif
35 : :
36 : : #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
37 : : #define EMN(edr) (((edr) & SYSMPU_EDR_EMN_MASK) >> SYSMPU_EDR_EMN_SHIFT)
38 : : #define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
39 : : #endif
40 : :
41 : : /* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
42 : : * It is used to perform an exception return and to detect possible state
43 : : * transition upon exception.
44 : : */
45 : :
46 : : /* Prefix. Indicates that this is an EXC_RETURN value.
47 : : * This field reads as 0b11111111.
48 : : */
49 : : #define EXC_RETURN_INDICATOR_PREFIX (0xFF << 24)
50 : : /* bit[0]: Exception Secure. The security domain the exception was taken to. */
51 : : #define EXC_RETURN_EXCEPTION_SECURE_Pos 0
52 : : #define EXC_RETURN_EXCEPTION_SECURE_Msk \
53 : : BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
54 : : #define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
55 : : #define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
56 : : /* bit[2]: Stack Pointer selection. */
57 : : #define EXC_RETURN_SPSEL_Pos 2
58 : : #define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
59 : : #define EXC_RETURN_SPSEL_MAIN 0
60 : : #define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
61 : : /* bit[3]: Mode. Indicates the Mode that was stacked from. */
62 : : #define EXC_RETURN_MODE_Pos 3
63 : : #define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
64 : : #define EXC_RETURN_MODE_HANDLER 0
65 : : #define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
66 : : /* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
67 : : * integer only stack frame or an extended floating-point stack frame.
68 : : */
69 : : #define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
70 : : #define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
71 : : #define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
72 : : #define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
73 : : /* bit[5]: Default callee register stacking. Indicates whether the default
74 : : * stacking rules apply, or whether the callee registers are already on the
75 : : * stack.
76 : : */
77 : : #define EXC_RETURN_CALLEE_STACK_Pos 5
78 : : #define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
79 : : #define EXC_RETURN_CALLEE_STACK_SKIPPED 0
80 : : #define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
81 : : /* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
82 : : * Non-secure stack is used to restore stack frame on exception return.
83 : : */
84 : : #define EXC_RETURN_RETURN_STACK_Pos 6
85 : : #define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
86 : : #define EXC_RETURN_RETURN_STACK_Non_Secure 0
87 : : #define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
88 : :
89 : : /* Integrity signature for an ARMv8-M implementation */
90 : : #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
91 : : #define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
92 : : #define INTEGRITY_SIGNATURE_EXT 0xFEFA125AUL
93 : : #else
94 : : #define INTEGRITY_SIGNATURE 0xFEFA125BUL
95 : : #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
96 : : /* Size (in words) of the additional state context that is pushed
97 : : * to the Secure stack during a Non-Secure exception entry.
98 : : */
99 : : #define ADDITIONAL_STATE_CONTEXT_WORDS 10
100 : :
101 : : /**
102 : : *
103 : : * Dump information regarding fault (FAULT_DUMP == 1)
104 : : *
105 : : * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
106 : : * (short form).
107 : : *
108 : : * eg. (precise bus error escalated to hard fault):
109 : : *
110 : : * Fault! EXC #3
111 : : * HARD FAULT: Escalation (see below)!
112 : : * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
113 : : * BFAR: 0xff001234
114 : : *
115 : : *
116 : : *
117 : : * Dump information regarding fault (FAULT_DUMP == 2)
118 : : *
119 : : * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
120 : : * (long form), and return the error code for the kernel to identify the fatal
121 : : * error reason.
122 : : *
123 : : * eg. (precise bus error escalated to hard fault):
124 : : *
125 : : * ***** HARD FAULT *****
126 : : * Fault escalation (see below)
127 : : * ***** BUS FAULT *****
128 : : * Precise data bus error
129 : : * Address: 0xff001234
130 : : *
131 : : */
132 : :
133 : : #if (CONFIG_FAULT_DUMP == 1)
134 : : static void fault_show(const z_arch_esf_t *esf, int fault)
135 : : {
136 : : PR_EXC("Fault! EXC #%d", fault);
137 : :
138 : : #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
139 : : PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x",
140 : : SCB_MMFSR, SCB_BFSR, SCB_UFSR);
141 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
142 : : PR_EXC("SFSR: 0x%x", SAU->SFSR);
143 : : #endif /* CONFIG_ARM_SECURE_FIRMWARE */
144 : : #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
145 : : }
146 : : #else
147 : : /* For Dump level 2, detailed information is generated by the
148 : : * fault handling functions for individual fault conditions, so this
149 : : * function is left empty.
150 : : *
151 : : * For Dump level 0, no information needs to be generated.
152 : : */
153 : 0 : static void fault_show(const z_arch_esf_t *esf, int fault)
154 : : {
155 : : (void)esf;
156 : : (void)fault;
157 : 0 : }
158 : : #endif /* FAULT_DUMP == 1 */
159 : :
160 : : #ifdef CONFIG_USERSPACE
161 : : Z_EXC_DECLARE(z_arm_user_string_nlen);
162 : :
163 : : static const struct z_exc_handle exceptions[] = {
164 : : Z_EXC_HANDLE(z_arm_user_string_nlen)
165 : : };
166 : : #endif
167 : :
168 : : /* Perform an assessment whether an MPU fault shall be
169 : : * treated as recoverable.
170 : : *
171 : : * @return true if error is recoverable, otherwise return false.
172 : : */
173 : 0 : static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous)
174 : : {
175 : : #ifdef CONFIG_USERSPACE
176 : : for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
177 : : /* Mask out instruction mode */
178 : : uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
179 : : uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
180 : :
181 : : #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
182 : : /* Non-synchronous exceptions (e.g. DebugMonitor) may have
183 : : * allowed PC to continue to the next instruction.
184 : : */
185 : : end += (synchronous) ? 0x0 : 0x4;
186 : : #else
187 : : ARG_UNUSED(synchronous);
188 : : #endif
189 : : if (esf->basic.pc >= start && esf->basic.pc < end) {
190 : : esf->basic.pc = (uint32_t)(exceptions[i].fixup);
191 : : return true;
192 : : }
193 : : }
194 : : #endif
195 : :
196 : 0 : return false;
197 : : }
198 : :
199 : : #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
200 : : /* HardFault is used for all fault conditions on ARMv6-M. */
201 : : #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
202 : :
203 : : #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
204 : : uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
205 : : const uint32_t psp);
206 : : #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
207 : :
208 : : /**
209 : : *
210 : : * @brief Dump MemManage fault information
211 : : *
212 : : * See z_arm_fault_dump() for example.
213 : : *
214 : : * @return error code to identify the fatal error reason
215 : : */
216 : 0 : static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
217 : : bool *recoverable)
218 : : {
219 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
220 : 0 : uint32_t mmfar = -EINVAL;
221 : :
222 [ # # ]: 0 : PR_FAULT_INFO("***** MPU FAULT *****");
223 : :
224 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) != 0) {
225 [ # # ]: 0 : PR_FAULT_INFO(" Stacking error (context area might be"
226 : : " not valid)");
227 : : }
228 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MUNSTKERR_Msk) != 0) {
229 [ # # ]: 0 : PR_FAULT_INFO(" Unstacking error");
230 : : }
231 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_DACCVIOL_Msk) != 0) {
232 [ # # ]: 0 : PR_FAULT_INFO(" Data Access Violation");
233 : : /* In a fault handler, to determine the true faulting address:
234 : : * 1. Read and save the MMFAR value.
235 : : * 2. Read the MMARVALID bit in the MMFSR.
236 : : * The MMFAR address is valid only if this bit is 1.
237 : : *
238 : : * Software must follow this sequence because another higher
239 : : * priority exception might change the MMFAR value.
240 : : */
241 : 0 : uint32_t temp = SCB->MMFAR;
242 : :
243 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
244 : 0 : mmfar = temp;
245 [ # # ]: 0 : PR_EXC(" MMFAR Address: 0x%x", mmfar);
246 [ # # ]: 0 : if (from_hard_fault != 0) {
247 : : /* clear SCB_MMAR[VALID] to reset */
248 : 0 : SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
249 : : }
250 : : }
251 : : }
252 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_IACCVIOL_Msk) != 0) {
253 [ # # ]: 0 : PR_FAULT_INFO(" Instruction Access Violation");
254 : : }
255 : : #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
256 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
257 [ # # ]: 0 : PR_FAULT_INFO(
258 : : " Floating-point lazy state preservation error");
259 : : }
260 : : #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
261 : :
262 : : /* When stack protection is enabled, we need to assess
263 : : * if the memory violation error is a stack corruption.
264 : : *
265 : : * By design, being a Stacking MemManage fault is a necessary
266 : : * and sufficient condition for a thread stack corruption.
267 : : * [Cortex-M process stack pointer is always descending and
268 : : * is never modified by code (except for the context-switch
269 : : * routine), therefore, a stacking error implies the PSP has
270 : : * crossed into an area beyond the thread stack.]
271 : : *
272 : : * Data Access Violation errors may or may not be caused by
273 : : * thread stack overflows.
274 : : */
275 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MSTKERR_Msk) ||
276 [ # # ]: 0 : (SCB->CFSR & SCB_CFSR_DACCVIOL_Msk)) {
277 : : #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
278 : : /* MemManage Faults are always banked between security
279 : : * states. Therefore, we can safely assume the fault
280 : : * originated from the same security state.
281 : : *
282 : : * As we only assess thread stack corruption, we only
283 : : * process the error further if the stack frame is on
284 : : * PSP. For always-banked MemManage Fault, this is
285 : : * equivalent to inspecting the RETTOBASE flag.
286 : : *
287 : : * Note:
288 : : * It is possible that MMFAR address is not written by the
289 : : * Cortex-M core; this occurs when the stacking error is
290 : : * not accompanied by a data access violation error (i.e.
291 : : * when stack overflows due to the exception entry frame
292 : : * stacking): z_check_thread_stack_fail() shall be able to
293 : : * handle the case of 'mmfar' holding the -EINVAL value.
294 : : */
295 : : if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
296 : : uint32_t min_stack_ptr = z_check_thread_stack_fail(mmfar,
297 : : ((uint32_t) &esf[0]));
298 : :
299 : : if (min_stack_ptr) {
300 : : /* When MemManage Stacking Error has occurred,
301 : : * the stack context frame might be corrupted
302 : : * but the stack pointer may have actually
303 : : * descent below the allowed (thread) stack
304 : : * area. We may face a problem with un-stacking
305 : : * the frame, upon the exception return, if we
306 : : * do not have sufficient access permissions to
307 : : * read the corrupted stack frame. Therefore,
308 : : * we manually force the stack pointer to the
309 : : * lowest allowed position, inside the thread's
310 : : * stack.
311 : : *
312 : : * Note:
313 : : * The PSP will normally be adjusted in a tail-
314 : : * chained exception performing context switch,
315 : : * after aborting the corrupted thread. The
316 : : * adjustment, here, is required as tail-chain
317 : : * cannot always be guaranteed.
318 : : *
319 : : * The manual adjustment of PSP is safe, as we
320 : : * will not be re-scheduling this thread again
321 : : * for execution; thread stack corruption is a
322 : : * fatal error and a thread that corrupted its
323 : : * stack needs to be aborted.
324 : : */
325 : : __set_PSP(min_stack_ptr);
326 : :
327 : : reason = K_ERR_STACK_CHK_FAIL;
328 : : } else {
329 : : __ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
330 : : "Stacking error not a stack fail\n");
331 : : }
332 : : }
333 : : #else
334 : : (void)mmfar;
335 [ # # ]: 0 : __ASSERT(!(SCB->CFSR & SCB_CFSR_MSTKERR_Msk),
336 : : "Stacking or Data Access Violation error "
337 : : "without stack guard, user-mode or null-pointer detection\n");
338 : : #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
339 : : }
340 : :
341 : : /* When we were handling this fault, we may have triggered a fp
342 : : * lazy stacking Memory Manage fault. At the time of writing, this
343 : : * can happen when printing. If that's true, we should clear the
344 : : * pending flag in addition to the clearing the reason for the fault
345 : : */
346 : : #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
347 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_MLSPERR_Msk) != 0) {
348 : 0 : SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTPENDED_Msk;
349 : : }
350 : : #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
351 : :
352 : : /* clear MMFSR sticky bits */
353 : 0 : SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
354 : :
355 : : /* Assess whether system shall ignore/recover from this MPU fault. */
356 : 0 : *recoverable = memory_fault_recoverable(esf, true);
357 : :
358 : 0 : return reason;
359 : : }
360 : :
361 : : /**
362 : : *
363 : : * @brief Dump BusFault information
364 : : *
365 : : * See z_arm_fault_dump() for example.
366 : : *
367 : : * @return error code to identify the fatal error reason.
368 : : *
369 : : */
370 : 0 : static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
371 : : {
372 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
373 : :
374 [ # # ]: 0 : PR_FAULT_INFO("***** BUS FAULT *****");
375 : :
376 [ # # ]: 0 : if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
377 [ # # ]: 0 : PR_FAULT_INFO(" Stacking error");
378 : : }
379 [ # # ]: 0 : if (SCB->CFSR & SCB_CFSR_UNSTKERR_Msk) {
380 [ # # ]: 0 : PR_FAULT_INFO(" Unstacking error");
381 : : }
382 [ # # ]: 0 : if (SCB->CFSR & SCB_CFSR_PRECISERR_Msk) {
383 [ # # ]: 0 : PR_FAULT_INFO(" Precise data bus error");
384 : : /* In a fault handler, to determine the true faulting address:
385 : : * 1. Read and save the BFAR value.
386 : : * 2. Read the BFARVALID bit in the BFSR.
387 : : * The BFAR address is valid only if this bit is 1.
388 : : *
389 : : * Software must follow this sequence because another
390 : : * higher priority exception might change the BFAR value.
391 : : */
392 : 0 : STORE_xFAR(bfar, SCB->BFAR);
393 : :
394 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
395 [ # # ]: 0 : PR_EXC(" BFAR Address: 0x%x", bfar);
396 [ # # ]: 0 : if (from_hard_fault != 0) {
397 : : /* clear SCB_CFSR_BFAR[VALID] to reset */
398 : 0 : SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
399 : : }
400 : : }
401 : : }
402 [ # # ]: 0 : if (SCB->CFSR & SCB_CFSR_IMPRECISERR_Msk) {
403 [ # # ]: 0 : PR_FAULT_INFO(" Imprecise data bus error");
404 : : }
405 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_IBUSERR_Msk) != 0) {
406 [ # # ]: 0 : PR_FAULT_INFO(" Instruction bus error");
407 : : #if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
408 : : }
409 : : #else
410 [ # # ]: 0 : } else if (SCB->CFSR & SCB_CFSR_LSPERR_Msk) {
411 [ # # ]: 0 : PR_FAULT_INFO(" Floating-point lazy state preservation error");
412 : : } else {
413 : : ;
414 : : }
415 : : #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
416 : :
417 : : #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
418 : : uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
419 : : uint32_t mask = BIT(31);
420 : : int i;
421 : : uint32_t ear = -EINVAL;
422 : :
423 : : if (sperr) {
424 : : for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
425 : : if ((sperr & mask) == 0U) {
426 : : continue;
427 : : }
428 : : STORE_xFAR(edr, SYSMPU->SP[i].EDR);
429 : : ear = SYSMPU->SP[i].EAR;
430 : :
431 : : PR_FAULT_INFO(" NXP MPU error, port %d", i);
432 : : PR_FAULT_INFO(" Mode: %s, %s Address: 0x%x",
433 : : edr & BIT(2) ? "Supervisor" : "User",
434 : : edr & BIT(1) ? "Data" : "Instruction",
435 : : ear);
436 : : PR_FAULT_INFO(
437 : : " Type: %s, Master: %d, Regions: 0x%x",
438 : : edr & BIT(0) ? "Write" : "Read",
439 : : EMN(edr), EACD(edr));
440 : :
441 : : /* When stack protection is enabled, we need to assess
442 : : * if the memory violation error is a stack corruption.
443 : : *
444 : : * By design, being a Stacking Bus fault is a necessary
445 : : * and sufficient condition for a stack corruption.
446 : : */
447 : : if (SCB->CFSR & SCB_CFSR_STKERR_Msk) {
448 : : #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
449 : : /* Note: we can assume the fault originated
450 : : * from the same security state for ARM
451 : : * platforms implementing the NXP MPU
452 : : * (CONFIG_CPU_HAS_NXP_MPU=y).
453 : : *
454 : : * As we only assess thread stack corruption,
455 : : * we only process the error further, if the
456 : : * stack frame is on PSP. For NXP MPU-related
457 : : * Bus Faults (banked), this is equivalent to
458 : : * inspecting the RETTOBASE flag.
459 : : */
460 : : if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
461 : : uint32_t min_stack_ptr =
462 : : z_check_thread_stack_fail(ear,
463 : : ((uint32_t) &esf[0]));
464 : :
465 : : if (min_stack_ptr) {
466 : : /* When BusFault Stacking Error
467 : : * has occurred, the stack
468 : : * context frame might be
469 : : * corrupted but the stack
470 : : * pointer may have actually
471 : : * moved. We may face problems
472 : : * with un-stacking the frame,
473 : : * upon exception return, if we
474 : : * do not have sufficient
475 : : * permissions to read the
476 : : * corrupted stack frame.
477 : : * Therefore, we manually force
478 : : * the stack pointer to the
479 : : * lowest allowed position.
480 : : *
481 : : * Note:
482 : : * The PSP will normally be
483 : : * adjusted in a tail-chained
484 : : * exception performing context
485 : : * switch, after aborting the
486 : : * corrupted thread. Here, the
487 : : * adjustment is required as
488 : : * tail-chain cannot always be
489 : : * guaranteed.
490 : : */
491 : : __set_PSP(min_stack_ptr);
492 : :
493 : : reason =
494 : : K_ERR_STACK_CHK_FAIL;
495 : : break;
496 : : }
497 : : }
498 : : #else
499 : : (void)ear;
500 : : __ASSERT(0,
501 : : "Stacking error without stack guard"
502 : : "or User-mode support");
503 : : #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
504 : : }
505 : : }
506 : : SYSMPU->CESR &= ~sperr;
507 : : }
508 : : #endif /* defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU) */
509 : :
510 : : /* clear BFSR sticky bits */
511 : 0 : SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
512 : :
513 : 0 : *recoverable = memory_fault_recoverable(esf, true);
514 : :
515 : 0 : return reason;
516 : : }
517 : :
518 : : /**
519 : : *
520 : : * @brief Dump UsageFault information
521 : : *
522 : : * See z_arm_fault_dump() for example.
523 : : *
524 : : * @return error code to identify the fatal error reason
525 : : */
526 : 0 : static uint32_t usage_fault(const z_arch_esf_t *esf)
527 : : {
528 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
529 : :
530 [ # # ]: 0 : PR_FAULT_INFO("***** USAGE FAULT *****");
531 : :
532 : : /* bits are sticky: they stack and must be reset */
533 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_DIVBYZERO_Msk) != 0) {
534 [ # # ]: 0 : PR_FAULT_INFO(" Division by zero");
535 : : }
536 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_UNALIGNED_Msk) != 0) {
537 [ # # ]: 0 : PR_FAULT_INFO(" Unaligned memory access");
538 : : }
539 : : #if defined(CONFIG_ARMV8_M_MAINLINE)
540 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_STKOF_Msk) != 0) {
541 [ # # ]: 0 : PR_FAULT_INFO(" Stack overflow (context area not valid)");
542 : : #if defined(CONFIG_BUILTIN_STACK_GUARD)
543 : : /* Stack Overflows are always reported as stack corruption
544 : : * errors. Note that the built-in stack overflow mechanism
545 : : * prevents the context area to be loaded on the stack upon
546 : : * UsageFault exception entry. As a result, we cannot rely
547 : : * on the reported faulty instruction address, to determine
548 : : * the instruction that triggered the stack overflow.
549 : : */
550 : 0 : reason = K_ERR_STACK_CHK_FAIL;
551 : : #endif /* CONFIG_BUILTIN_STACK_GUARD */
552 : : }
553 : : #endif /* CONFIG_ARMV8_M_MAINLINE */
554 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_NOCP_Msk) != 0) {
555 [ # # ]: 0 : PR_FAULT_INFO(" No coprocessor instructions");
556 : : }
557 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_INVPC_Msk) != 0) {
558 [ # # ]: 0 : PR_FAULT_INFO(" Illegal load of EXC_RETURN into PC");
559 : : }
560 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_INVSTATE_Msk) != 0) {
561 [ # # ]: 0 : PR_FAULT_INFO(" Illegal use of the EPSR");
562 : : }
563 [ # # ]: 0 : if ((SCB->CFSR & SCB_CFSR_UNDEFINSTR_Msk) != 0) {
564 [ # # ]: 0 : PR_FAULT_INFO(" Attempt to execute undefined instruction");
565 : : }
566 : :
567 : : /* clear UFSR sticky bits */
568 : 0 : SCB->CFSR |= SCB_CFSR_USGFAULTSR_Msk;
569 : :
570 : 0 : return reason;
571 : : }
572 : :
573 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
574 : : /**
575 : : *
576 : : * @brief Dump SecureFault information
577 : : *
578 : : * See z_arm_fault_dump() for example.
579 : : *
580 : : */
581 : : static void secure_fault(const z_arch_esf_t *esf)
582 : : {
583 : : PR_FAULT_INFO("***** SECURE FAULT *****");
584 : :
585 : : STORE_xFAR(sfar, SAU->SFAR);
586 : : if ((SAU->SFSR & SAU_SFSR_SFARVALID_Msk) != 0) {
587 : : PR_EXC(" Address: 0x%x", sfar);
588 : : }
589 : :
590 : : /* bits are sticky: they stack and must be reset */
591 : : if ((SAU->SFSR & SAU_SFSR_INVEP_Msk) != 0) {
592 : : PR_FAULT_INFO(" Invalid entry point");
593 : : } else if ((SAU->SFSR & SAU_SFSR_INVIS_Msk) != 0) {
594 : : PR_FAULT_INFO(" Invalid integrity signature");
595 : : } else if ((SAU->SFSR & SAU_SFSR_INVER_Msk) != 0) {
596 : : PR_FAULT_INFO(" Invalid exception return");
597 : : } else if ((SAU->SFSR & SAU_SFSR_AUVIOL_Msk) != 0) {
598 : : PR_FAULT_INFO(" Attribution unit violation");
599 : : } else if ((SAU->SFSR & SAU_SFSR_INVTRAN_Msk) != 0) {
600 : : PR_FAULT_INFO(" Invalid transition");
601 : : } else if ((SAU->SFSR & SAU_SFSR_LSPERR_Msk) != 0) {
602 : : PR_FAULT_INFO(" Lazy state preservation");
603 : : } else if ((SAU->SFSR & SAU_SFSR_LSERR_Msk) != 0) {
604 : : PR_FAULT_INFO(" Lazy state error");
605 : : }
606 : :
607 : : /* clear SFSR sticky bits */
608 : : SAU->SFSR |= 0xFF;
609 : : }
610 : : #endif /* defined(CONFIG_ARM_SECURE_FIRMWARE) */
611 : :
612 : : /**
613 : : *
614 : : * @brief Dump debug monitor exception information
615 : : *
616 : : * See z_arm_fault_dump() for example.
617 : : *
618 : : */
619 : 0 : static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
620 : : {
621 : 0 : *recoverable = false;
622 : :
623 [ # # ]: 0 : PR_FAULT_INFO(
624 : : "***** Debug monitor exception *****");
625 : :
626 : : #if defined(CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT)
627 : : if (!z_arm_debug_monitor_event_error_check()) {
628 : : /* By default, all debug monitor exceptions that are not
629 : : * treated as errors by z_arm_debug_event_error_check(),
630 : : * they are considered as recoverable errors.
631 : : */
632 : : *recoverable = true;
633 : : } else {
634 : :
635 : : *recoverable = memory_fault_recoverable(esf, false);
636 : : }
637 : :
638 : : #endif
639 : 0 : }
640 : :
641 : : #else
642 : : #error Unknown ARM architecture
643 : : #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
644 : :
645 : 0 : static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf)
646 : : {
647 : 0 : uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
648 : : /* SVC is a 16-bit instruction. On a synchronous SVC
649 : : * escalated to Hard Fault, the return address is the
650 : : * next instruction, i.e. after the SVC.
651 : : */
652 : : #define _SVC_OPCODE 0xDF00
653 : :
654 : 0 : uint16_t fault_insn = *(ret_addr - 1);
655 : :
656 [ # # ]: 0 : if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
657 [ # # ]: 0 : ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
658 : 0 : return true;
659 : : }
660 : : #undef _SVC_OPCODE
661 : 0 : return false;
662 : : }
663 : :
664 : : /**
665 : : *
666 : : * @brief Dump hard fault information
667 : : *
668 : : * See z_arm_fault_dump() for example.
669 : : *
670 : : * @return error code to identify the fatal error reason
671 : : */
672 : 0 : static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
673 : : {
674 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
675 : :
676 [ # # ]: 0 : PR_FAULT_INFO("***** HARD FAULT *****");
677 : :
678 : : #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
679 : : /* Workaround for #18712:
680 : : * HardFault may be due to escalation, as a result of
681 : : * an SVC instruction that could not be executed; this
682 : : * can occur if ARCH_EXCEPT() is called by an ISR,
683 : : * which executes at priority equal to the SVC handler
684 : : * priority. We handle the case of Kernel OOPS and Stack
685 : : * Fail here.
686 : : */
687 : : if (z_arm_is_synchronous_svc(esf)) {
688 : :
689 : : PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
690 : : reason = esf->basic.r0;
691 : : }
692 : :
693 : : *recoverable = memory_fault_recoverable(esf, true);
694 : : #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
695 : 0 : *recoverable = false;
696 : :
697 [ # # ]: 0 : if ((SCB->HFSR & SCB_HFSR_VECTTBL_Msk) != 0) {
698 [ # # ]: 0 : PR_EXC(" Bus fault on vector table read");
699 [ # # ]: 0 : } else if ((SCB->HFSR & SCB_HFSR_DEBUGEVT_Msk) != 0) {
700 [ # # ]: 0 : PR_EXC(" Debug event");
701 [ # # ]: 0 : } else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
702 [ # # ]: 0 : PR_EXC(" Fault escalation (see below)");
703 [ # # ]: 0 : if (z_arm_is_synchronous_svc(esf)) {
704 [ # # ]: 0 : PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
705 : 0 : reason = esf->basic.r0;
706 [ # # ]: 0 : } else if (SCB_MMFSR != 0) {
707 : 0 : reason = mem_manage_fault(esf, 1, recoverable);
708 [ # # ]: 0 : } else if (SCB_BFSR != 0) {
709 : 0 : reason = bus_fault(esf, 1, recoverable);
710 [ # # ]: 0 : } else if (SCB_UFSR != 0) {
711 : 0 : reason = usage_fault(esf);
712 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
713 : : } else if (SAU->SFSR != 0) {
714 : : secure_fault(esf);
715 : : #endif /* CONFIG_ARM_SECURE_FIRMWARE */
716 : : } else {
717 : 0 : __ASSERT(0,
718 : : "Fault escalation without FSR info");
719 : : }
720 : : } else {
721 : 0 : __ASSERT(0,
722 : : "HardFault without HFSR info"
723 : : " Shall never occur");
724 : : }
725 : : #else
726 : : #error Unknown ARM architecture
727 : : #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
728 : :
729 : 0 : return reason;
730 : : }
731 : :
732 : : /**
733 : : *
734 : : * @brief Dump reserved exception information
735 : : *
736 : : * See z_arm_fault_dump() for example.
737 : : *
738 : : */
739 : 0 : static void reserved_exception(const z_arch_esf_t *esf, int fault)
740 : : {
741 : : ARG_UNUSED(esf);
742 : :
743 [ # # # # ]: 0 : PR_FAULT_INFO("***** %s %d) *****",
744 : : fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
745 : : fault - 16);
746 : 0 : }
747 : :
748 : : /* Handler function for ARM fault conditions. */
749 : 0 : static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
750 : : {
751 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
752 : :
753 : 0 : *recoverable = false;
754 : :
755 [ # # # # : 0 : switch (fault) {
# # ]
756 : 0 : case 3:
757 : 0 : reason = hard_fault(esf, recoverable);
758 : 0 : break;
759 : : #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
760 : : /* HardFault is raised for all fault conditions on ARMv6-M. */
761 : : #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
762 : 0 : case 4:
763 : 0 : reason = mem_manage_fault(esf, 0, recoverable);
764 : 0 : break;
765 : 0 : case 5:
766 : 0 : reason = bus_fault(esf, 0, recoverable);
767 : 0 : break;
768 : 0 : case 6:
769 : 0 : reason = usage_fault(esf);
770 : 0 : break;
771 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
772 : : case 7:
773 : : secure_fault(esf);
774 : : break;
775 : : #endif /* CONFIG_ARM_SECURE_FIRMWARE */
776 : 0 : case 12:
777 : 0 : debug_monitor(esf, recoverable);
778 : 0 : break;
779 : : #else
780 : : #error Unknown ARM architecture
781 : : #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
782 : 0 : default:
783 : 0 : reserved_exception(esf, fault);
784 : 0 : break;
785 : : }
786 : :
787 [ # # ]: 0 : if ((*recoverable) == false) {
788 : : /* Dump generic information about the fault. */
789 : 0 : fault_show(esf, fault);
790 : : }
791 : :
792 : 0 : return reason;
793 : : }
794 : :
795 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
796 : : #if (CONFIG_FAULT_DUMP == 2)
797 : : /**
798 : : * @brief Dump the Secure Stack information for an exception that
799 : : * has occurred in Non-Secure state.
800 : : *
801 : : * @param secure_esf Pointer to the secure stack frame.
802 : : */
803 : : static void secure_stack_dump(const z_arch_esf_t *secure_esf)
804 : : {
805 : : /*
806 : : * In case a Non-Secure exception interrupted the Secure
807 : : * execution, the Secure state has stacked the additional
808 : : * state context and the top of the stack contains the
809 : : * integrity signature.
810 : : *
811 : : * In case of a Non-Secure function call the top of the
812 : : * stack contains the return address to Secure state.
813 : : */
814 : : uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
815 : : uint32_t sec_ret_addr;
816 : : #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
817 : : if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
818 : : (*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
819 : : #else
820 : : if (*top_of_sec_stack == INTEGRITY_SIGNATURE) {
821 : : #endif /* CONFIG_ARMV7_M_ARMV8_M_FP */
822 : : /* Secure state interrupted by a Non-Secure exception.
823 : : * The return address after the additional state
824 : : * context, stacked by the Secure code upon
825 : : * Non-Secure exception entry.
826 : : */
827 : : top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
828 : : secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
829 : : sec_ret_addr = secure_esf->basic.pc;
830 : : } else {
831 : : /* Exception during Non-Secure function call.
832 : : * The return address is located on top of stack.
833 : : */
834 : : sec_ret_addr = *top_of_sec_stack;
835 : : }
836 : : PR_FAULT_INFO(" S instruction address: 0x%x", sec_ret_addr);
837 : :
838 : : }
839 : : #define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
840 : : #else
841 : : /* We do not dump the Secure stack information for lower dump levels. */
842 : : #define SECURE_STACK_DUMP(esf)
843 : : #endif /* CONFIG_FAULT_DUMP== 2 */
844 : : #endif /* CONFIG_ARM_SECURE_FIRMWARE */
845 : :
846 : : /*
847 : : * This internal function does the following:
848 : : *
849 : : * - Retrieves the exception stack frame
850 : : * - Evaluates whether to report being in a nested exception
851 : : *
852 : : * If the ESF is not successfully retrieved, the function signals
853 : : * an error by returning NULL.
854 : : *
855 : : * @return ESF pointer on success, otherwise return NULL
856 : : */
857 : 0 : static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
858 : : bool *nested_exc)
859 : : {
860 : 0 : bool alternative_state_exc = false;
861 : 0 : z_arch_esf_t *ptr_esf = NULL;
862 : :
863 : 0 : *nested_exc = false;
864 : :
865 [ # # ]: 0 : if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
866 : : EXC_RETURN_INDICATOR_PREFIX) {
867 : : /* Invalid EXC_RETURN value. This is a fatal error. */
868 : 0 : return NULL;
869 : : }
870 : :
871 : : #if defined(CONFIG_ARM_SECURE_FIRMWARE)
872 : : if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
873 : : /* Secure Firmware shall only handle Secure Exceptions.
874 : : * This is a fatal error.
875 : : */
876 : : return NULL;
877 : : }
878 : :
879 : : if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
880 : : /* Exception entry occurred in Secure stack. */
881 : : } else {
882 : : /* Exception entry occurred in Non-Secure stack. Therefore,
883 : : * msp/psp point to the Secure stack, however, the actual
884 : : * exception stack frame is located in the Non-Secure stack.
885 : : */
886 : : alternative_state_exc = true;
887 : :
888 : : /* Dump the Secure stack before handling the actual fault. */
889 : : z_arch_esf_t *secure_esf;
890 : :
891 : : if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
892 : : /* Secure stack pointed by PSP */
893 : : secure_esf = (z_arch_esf_t *)psp;
894 : : } else {
895 : : /* Secure stack pointed by MSP */
896 : : secure_esf = (z_arch_esf_t *)msp;
897 : : *nested_exc = true;
898 : : }
899 : :
900 : : SECURE_STACK_DUMP(secure_esf);
901 : :
902 : : /* Handle the actual fault.
903 : : * Extract the correct stack frame from the Non-Secure state
904 : : * and supply it to the fault handing function.
905 : : */
906 : : if (exc_return & EXC_RETURN_MODE_THREAD) {
907 : : ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
908 : : } else {
909 : : ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
910 : : }
911 : : }
912 : : #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
913 : : if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
914 : : /* Non-Secure Firmware shall only handle Non-Secure Exceptions.
915 : : * This is a fatal error.
916 : : */
917 : : return NULL;
918 : : }
919 : :
920 : : if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
921 : : /* Exception entry occurred in Secure stack.
922 : : *
923 : : * Note that Non-Secure firmware cannot inspect the Secure
924 : : * stack to determine the root cause of the fault. Fault
925 : : * inspection will indicate the Non-Secure instruction
926 : : * that performed the branch to the Secure domain.
927 : : */
928 : : alternative_state_exc = true;
929 : :
930 : : PR_FAULT_INFO("Exception occurred in Secure State");
931 : :
932 : : if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
933 : : /* Non-Secure stack frame on PSP */
934 : : ptr_esf = (z_arch_esf_t *)psp;
935 : : } else {
936 : : /* Non-Secure stack frame on MSP */
937 : : ptr_esf = (z_arch_esf_t *)msp;
938 : : }
939 : : } else {
940 : : /* Exception entry occurred in Non-Secure stack. */
941 : : }
942 : : #else
943 : : /* The processor has a single execution state.
944 : : * We verify that the Thread mode is using PSP.
945 : : */
946 [ # # ]: 0 : if ((exc_return & EXC_RETURN_MODE_THREAD) &&
947 [ # # ]: 0 : (!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
948 [ # # ]: 0 : PR_EXC("SPSEL in thread mode does not indicate PSP");
949 : 0 : return NULL;
950 : : }
951 : : #endif /* CONFIG_ARM_SECURE_FIRMWARE */
952 : :
953 [ # # ]: 0 : if (!alternative_state_exc) {
954 [ # # ]: 0 : if (exc_return & EXC_RETURN_MODE_THREAD) {
955 : : /* Returning to thread mode */
956 : 0 : ptr_esf = (z_arch_esf_t *)psp;
957 : :
958 : : } else {
959 : : /* Returning to handler mode */
960 : 0 : ptr_esf = (z_arch_esf_t *)msp;
961 : 0 : *nested_exc = true;
962 : : }
963 : : }
964 : :
965 : 0 : return ptr_esf;
966 : : }
967 : :
968 : : /**
969 : : *
970 : : * @brief ARM Fault handler
971 : : *
972 : : * This routine is called when fatal error conditions are detected by hardware
973 : : * and is responsible for:
974 : : * - resetting the processor fault status registers (for the case when the
975 : : * error handling policy allows the system to recover from the error),
976 : : * - reporting the error information,
977 : : * - determining the error reason to be provided as input to the user-
978 : : * provided routine, k_sys_fatal_error_handler().
979 : : * The k_sys_fatal_error_handler() is invoked once the above operations are
980 : : * completed, and is responsible for implementing the error handling policy.
981 : : *
982 : : * The function needs, first, to determine the exception stack frame.
983 : : * Note that the current security state might not be the actual
984 : : * state in which the processor was executing, when the exception occurred.
985 : : * The actual state may need to be determined by inspecting the EXC_RETURN
986 : : * value, which is provided as argument to the Fault handler.
987 : : *
988 : : * If the exception occurred in the same security state, the stack frame
989 : : * will be pointed to by either MSP or PSP depending on the processor
990 : : * execution state when the exception occurred. MSP and PSP values are
991 : : * provided as arguments to the Fault handler.
992 : : *
993 : : * @param msp MSP value immediately after the exception occurred
994 : : * @param psp PSP value immediately after the exception occurred
995 : : * @param exc_return EXC_RETURN value present in LR after exception entry.
996 : : * @param callee_regs Callee-saved registers (R4-R11, PSP)
997 : : *
998 : : */
999 : 0 : void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
1000 : : _callee_saved_t *callee_regs)
1001 : : {
1002 : 0 : uint32_t reason = K_ERR_CPU_EXCEPTION;
1003 : 0 : int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
1004 : : bool recoverable, nested_exc;
1005 : : z_arch_esf_t *esf;
1006 : :
1007 : : /* Create a stack-ed copy of the ESF to be used during
1008 : : * the fault handling process.
1009 : : */
1010 : : z_arch_esf_t esf_copy;
1011 : :
1012 : : /* Force unlock interrupts */
1013 : 0 : arch_irq_unlock(0);
1014 : :
1015 : : /* Retrieve the Exception Stack Frame (ESF) to be supplied
1016 : : * as argument to the remainder of the fault handling process.
1017 : : */
1018 : 0 : esf = get_esf(msp, psp, exc_return, &nested_exc);
1019 [ # # ]: 0 : __ASSERT(esf != NULL,
1020 : : "ESF could not be retrieved successfully. Shall never occur.");
1021 : :
1022 : : #ifdef CONFIG_DEBUG_COREDUMP
1023 : : z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
1024 : : #endif
1025 : :
1026 : 0 : reason = fault_handle(esf, fault, &recoverable);
1027 [ # # ]: 0 : if (recoverable) {
1028 : 0 : return;
1029 : : }
1030 : :
1031 : : /* Copy ESF */
1032 : : #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
1033 : 0 : memcpy(&esf_copy, esf, sizeof(z_arch_esf_t));
1034 : : ARG_UNUSED(callee_regs);
1035 : : #else
1036 : : /* the extra exception info is not present in the original esf
1037 : : * so we only copy the fields before those.
1038 : : */
1039 : : memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
1040 : : esf_copy.extra_info = (struct __extra_esf_info) {
1041 : : .callee = callee_regs,
1042 : : .exc_return = exc_return,
1043 : : .msp = msp
1044 : : };
1045 : : #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
1046 : :
1047 : : /* Overwrite stacked IPSR to mark a nested exception,
1048 : : * or a return to Thread mode. Note that this may be
1049 : : * required, if the retrieved ESF contents are invalid
1050 : : * due to, for instance, a stacking error.
1051 : : */
1052 [ # # ]: 0 : if (nested_exc) {
1053 [ # # ]: 0 : if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
1054 : 0 : esf_copy.basic.xpsr |= IPSR_ISR_Msk;
1055 : : }
1056 : : } else {
1057 : 0 : esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
1058 : : }
1059 : :
1060 : 0 : z_arm_fatal_error(reason, &esf_copy);
1061 : : }
1062 : :
1063 : : /**
1064 : : *
1065 : : * @brief Initialization of fault handling
1066 : : *
1067 : : * Turns on the desired hardware faults.
1068 : : *
1069 : : */
1070 : 1 : void z_arm_fault_init(void)
1071 : : {
1072 : : #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
1073 : : #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
1074 : 1 : SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
1075 : : #else
1076 : : #error Unknown ARM architecture
1077 : : #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
1078 : : #if defined(CONFIG_BUILTIN_STACK_GUARD)
1079 : : /* If Stack guarding via SP limit checking is enabled, disable
1080 : : * SP limit checking inside HardFault and NMI. This is done
1081 : : * in order to allow for the desired fault logging to execute
1082 : : * properly in all cases.
1083 : : *
1084 : : * Note that this could allow a Secure Firmware Main Stack
1085 : : * to descend into non-secure region during HardFault and
1086 : : * NMI exception entry. To prevent from this, non-secure
1087 : : * memory regions must be located higher than secure memory
1088 : : * regions.
1089 : : *
1090 : : * For Non-Secure Firmware this could allow the Non-Secure Main
1091 : : * Stack to attempt to descend into secure region, in which case a
1092 : : * Secure Hard Fault will occur and we can track the fault from there.
1093 : : */
1094 : 1 : SCB->CCR |= SCB_CCR_STKOFHFNMIGN_Msk;
1095 : : #endif /* CONFIG_BUILTIN_STACK_GUARD */
1096 : : #ifdef CONFIG_TRAP_UNALIGNED_ACCESS
1097 : : SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
1098 : : #endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
1099 : 1 : }
|