Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 : : *
4 : : * SPDX-License-Identifier: Apache-2.0
5 : : */
6 : :
7 : : /**
8 : : * @file
9 : : * @brief Architecture-independent private kernel APIs
10 : : *
11 : : * This file contains private kernel APIs that are not architecture-specific.
12 : : */
13 : :
14 : : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15 : : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
16 : :
17 : : #include <kernel.h>
18 : : #include <kernel_arch_interface.h>
19 : : #include <string.h>
20 : :
21 : : #ifndef _ASMLANGUAGE
22 : :
23 : : #ifdef __cplusplus
24 : : extern "C" {
25 : : #endif
26 : :
27 : : /* Early boot functions */
28 : :
29 : : void z_early_memset(void *dst, int c, size_t n);
30 : : void z_early_memcpy(void *dst, const void *src, size_t n);
31 : :
32 : : void z_bss_zero(void);
33 : : #ifdef CONFIG_XIP
34 : : void z_data_copy(void);
35 : : #else
36 : : static inline void z_data_copy(void)
37 : : {
38 : : /* Do nothing */
39 : : }
40 : : #endif
41 : :
42 : : #ifdef CONFIG_LINKER_USE_BOOT_SECTION
43 : : void z_bss_zero_boot(void);
44 : : #else
45 : : static inline void z_bss_zero_boot(void)
46 : : {
47 : : /* Do nothing */
48 : : }
49 : : #endif
50 : :
51 : : #ifdef CONFIG_LINKER_USE_PINNED_SECTION
52 : : void z_bss_zero_pinned(void);
53 : : #else
54 : : static inline void z_bss_zero_pinned(void)
55 : : {
56 : : /* Do nothing */
57 : : }
58 : : #endif
59 : :
60 : : FUNC_NORETURN void z_cstart(void);
61 : :
62 : : void z_device_state_init(void);
63 : :
64 : : extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
65 : : void *p1, void *p2, void *p3);
66 : :
67 : : extern char *z_setup_new_thread(struct k_thread *new_thread,
68 : : k_thread_stack_t *stack, size_t stack_size,
69 : : k_thread_entry_t entry,
70 : : void *p1, void *p2, void *p3,
71 : : int prio, uint32_t options, const char *name);
72 : :
73 : : /**
74 : : * @brief Allocate aligned memory from the current thread's resource pool
75 : : *
76 : : * Threads may be assigned a resource pool, which will be used to allocate
77 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
78 : : * in this way should be freed with k_free().
79 : : *
80 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
81 : : *
82 : : * @param align Required memory alignment
83 : : * @param size Memory allocation size
84 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
85 : : * RAM in the pool or there is no pool to draw memory from
86 : : */
87 : : void *z_thread_aligned_alloc(size_t align, size_t size);
88 : :
89 : : /**
90 : : * @brief Allocate some memory from the current thread's resource pool
91 : : *
92 : : * Threads may be assigned a resource pool, which will be used to allocate
93 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
94 : : * in this way should be freed with k_free().
95 : : *
96 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
97 : : *
98 : : * @param size Memory allocation size
99 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
100 : : * RAM in the pool or there is no pool to draw memory from
101 : : */
102 : : static inline void *z_thread_malloc(size_t size)
103 : : {
104 : : return z_thread_aligned_alloc(0, size);
105 : : }
106 : :
107 : : /* set and clear essential thread flag */
108 : :
109 : : extern void z_thread_essential_set(void);
110 : : extern void z_thread_essential_clear(void);
111 : :
112 : : /* clean up when a thread is aborted */
113 : :
114 : : #if defined(CONFIG_THREAD_MONITOR)
115 : : extern void z_thread_monitor_exit(struct k_thread *thread);
116 : : #else
117 : : #define z_thread_monitor_exit(thread) \
118 : : do {/* nothing */ \
119 : : } while (false)
120 : : #endif /* CONFIG_THREAD_MONITOR */
121 : :
122 : : #ifdef CONFIG_USE_SWITCH
123 : : /* This is a arch function traditionally, but when the switch-based
124 : : * z_swap() is in use it's a simple inline provided by the kernel.
125 : : */
126 : : static ALWAYS_INLINE void
127 : : arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
128 : : {
129 : : thread->swap_retval = value;
130 : : }
131 : : #endif
132 : :
133 : : static ALWAYS_INLINE void
134 : 0 : z_thread_return_value_set_with_data(struct k_thread *thread,
135 : : unsigned int value,
136 : : void *data)
137 : : {
138 : 0 : arch_thread_return_value_set(thread, value);
139 : 0 : thread->base.swap_data = data;
140 : 0 : }
141 : :
142 : : #ifdef CONFIG_SMP
143 : : extern void z_smp_init(void);
144 : : extern void smp_timer_init(void);
145 : : #endif
146 : :
147 : : extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
148 : :
149 : : #if CONFIG_STACK_POINTER_RANDOM
150 : : extern int z_stack_adjust_initialized;
151 : : #endif
152 : :
153 : : extern struct k_thread z_main_thread;
154 : :
155 : :
156 : : #ifdef CONFIG_MULTITHREADING
157 : : extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
158 : : #endif
159 : : K_KERNEL_PINNED_STACK_ARRAY_EXTERN(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
160 : : CONFIG_ISR_STACK_SIZE);
161 : :
162 : : #ifdef CONFIG_GEN_PRIV_STACKS
163 : : extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
164 : : #endif
165 : :
166 : : /* Calculate stack usage. */
167 : : int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
168 : :
169 : : #ifdef CONFIG_USERSPACE
170 : : bool z_stack_is_user_capable(k_thread_stack_t *stack);
171 : :
172 : : /* Memory domain setup hook, called from z_setup_new_thread() */
173 : : void z_mem_domain_init_thread(struct k_thread *thread);
174 : :
175 : : /* Memory domain teardown hook, called from z_thread_abort() */
176 : : void z_mem_domain_exit_thread(struct k_thread *thread);
177 : :
178 : : /* This spinlock:
179 : : *
180 : : * - Protects the full set of active k_mem_domain objects and their contents
181 : : * - Serializes calls to arch_mem_domain_* APIs
182 : : *
183 : : * If architecture code needs to access k_mem_domain structures or the
184 : : * partitions they contain at any other point, this spinlock should be held.
185 : : * Uniprocessor systems can get away with just locking interrupts but this is
186 : : * not recommended.
187 : : */
188 : : extern struct k_spinlock z_mem_domain_lock;
189 : : #endif /* CONFIG_USERSPACE */
190 : :
191 : : #ifdef CONFIG_GDBSTUB
192 : : struct gdb_ctx;
193 : :
194 : : /* Should be called by the arch layer. This is the gdbstub main loop
195 : : * and synchronously communicate with gdb on host.
196 : : */
197 : : extern int z_gdb_main_loop(struct gdb_ctx *ctx);
198 : : #endif
199 : :
200 : : #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
201 : : void z_thread_mark_switched_in(void);
202 : : void z_thread_mark_switched_out(void);
203 : : #else
204 : :
205 : : /**
206 : : * @brief Called after a thread has been selected to run
207 : : */
208 : : #define z_thread_mark_switched_in()
209 : :
210 : : /**
211 : : * @brief Called before a thread has been selected to run
212 : : */
213 : :
214 : : #define z_thread_mark_switched_out()
215 : :
216 : : #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
217 : :
218 : : /* Init hook for page frame management, invoked immediately upon entry of
219 : : * main thread, before POST_KERNEL tasks
220 : : */
221 : : void z_mem_manage_init(void);
222 : :
223 : : /**
224 : : * @brief Finalize page frame management at the end of boot process.
225 : : */
226 : : void z_mem_manage_boot_finish(void);
227 : :
228 : : #define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
229 : : __key = k_spin_lock(lck); \
230 : : !__i.key; \
231 : : k_spin_unlock(lck, __key), __i.key = 1)
232 : :
233 : : #ifdef CONFIG_PM
234 : :
235 : : /* When the kernel is about to go idle, it calls this function to notify the
236 : : * power management subsystem, that the kernel is ready to enter the idle state.
237 : : *
238 : : * At this point, the kernel has disabled interrupts and computed the maximum
239 : : * time the system can remain idle. The function passes the time that the system
240 : : * can remain idle. The SOC interface performs power operations that can be done
241 : : * in the available time. The power management operations must halt execution of
242 : : * the CPU.
243 : : *
244 : : * This function assumes that a wake up event has already been set up by the
245 : : * application.
246 : : *
247 : : * This function is entered with interrupts disabled. It should re-enable
248 : : * interrupts if it had entered a power state.
249 : : *
250 : : * @return True if the system suspended, otherwise return false
251 : : */
252 : : bool pm_system_suspend(int32_t ticks);
253 : :
254 : : /**
255 : : * Notify exit from kernel idling after PM operations
256 : : *
257 : : * This function would notify exit from kernel idling if a corresponding
258 : : * pm_system_suspend() notification was handled and did not return
259 : : * PM_STATE_ACTIVE.
260 : : *
261 : : * This function would be called from the ISR context of the event
262 : : * that caused the exit from kernel idling. This will be called immediately
263 : : * after interrupts are enabled. This is called to give a chance to do
264 : : * any operations before the kernel would switch tasks or processes nested
265 : : * interrupts. This is required for cpu low power states that would require
266 : : * interrupts to be enabled while entering low power states. e.g. C1 in x86. In
267 : : * those cases, the ISR would be invoked immediately after the event wakes up
268 : : * the CPU, before code following the CPU wait, gets a chance to execute. This
269 : : * can be ignored if no operation needs to be done at the wake event
270 : : * notification.
271 : : */
272 : : void pm_system_resume(void);
273 : :
274 : : #endif
275 : :
276 : : #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
277 : : /**
278 : : * Initialize the timing histograms for demand paging.
279 : : */
280 : : void z_paging_histogram_init(void);
281 : :
282 : : /**
283 : : * Increment the counter in the timing histogram.
284 : : *
285 : : * @param hist The timing histogram to be updated.
286 : : * @param cycles Time spent in measured operation.
287 : : */
288 : : void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
289 : : uint32_t cycles);
290 : : #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
291 : :
292 : : #ifdef __cplusplus
293 : : }
294 : : #endif
295 : :
296 : : #endif /* _ASMLANGUAGE */
297 : :
298 : : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
|