Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2016, Wind River Systems, Inc.
3 : : *
4 : : * SPDX-License-Identifier: Apache-2.0
5 : : */
6 : :
7 : : /**
8 : : * @file
9 : : *
10 : : * @brief Public kernel APIs.
11 : : */
12 : :
13 : : #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 : : #define ZEPHYR_INCLUDE_KERNEL_H_
15 : :
16 : : #if !defined(_ASMLANGUAGE)
17 : : #include <kernel_includes.h>
18 : : #include <errno.h>
19 : : #include <limits.h>
20 : : #include <stdbool.h>
21 : : #include <toolchain.h>
22 : : #include <tracing/tracing_macros.h>
23 : :
24 : : #ifdef __cplusplus
25 : : extern "C" {
26 : : #endif
27 : :
28 : : /**
29 : : * @brief Kernel APIs
30 : : * @defgroup kernel_apis Kernel APIs
31 : : * @{
32 : : * @}
33 : : */
34 : :
35 : : #define K_ANY NULL
36 : : #define K_END NULL
37 : :
38 : : #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
39 : : #error Zero available thread priorities defined!
40 : : #endif
41 : :
42 : : #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
43 : : #define K_PRIO_PREEMPT(x) (x)
44 : :
45 : : #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
46 : : #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
47 : : #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
48 : : #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
49 : : #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
50 : :
51 : : #ifdef CONFIG_POLL
52 : : #define _POLL_EVENT_OBJ_INIT(obj) \
53 : : .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
54 : : #define _POLL_EVENT sys_dlist_t poll_events
55 : : #else
56 : : #define _POLL_EVENT_OBJ_INIT(obj)
57 : : #define _POLL_EVENT
58 : : #endif
59 : :
60 : : struct k_thread;
61 : : struct k_mutex;
62 : : struct k_sem;
63 : : struct k_msgq;
64 : : struct k_mbox;
65 : : struct k_pipe;
66 : : struct k_queue;
67 : : struct k_fifo;
68 : : struct k_lifo;
69 : : struct k_stack;
70 : : struct k_mem_slab;
71 : : struct k_mem_pool;
72 : : struct k_timer;
73 : : struct k_poll_event;
74 : : struct k_poll_signal;
75 : : struct k_mem_domain;
76 : : struct k_mem_partition;
77 : : struct k_futex;
78 : : struct k_event;
79 : :
80 : : enum execution_context_types {
81 : : K_ISR = 0,
82 : : K_COOP_THREAD,
83 : : K_PREEMPT_THREAD,
84 : : };
85 : :
86 : : /* private, used by k_poll and k_work_poll */
87 : : struct k_work_poll;
88 : : typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
89 : :
90 : : /**
91 : : * @addtogroup thread_apis
92 : : * @{
93 : : */
94 : :
95 : : typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
96 : : void *user_data);
97 : :
98 : : /**
99 : : * @brief Iterate over all the threads in the system.
100 : : *
101 : : * This routine iterates over all the threads in the system and
102 : : * calls the user_cb function for each thread.
103 : : *
104 : : * @param user_cb Pointer to the user callback function.
105 : : * @param user_data Pointer to user data.
106 : : *
107 : : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
108 : : * to be effective.
109 : : * @note This API uses @ref k_spin_lock to protect the _kernel.threads
110 : : * list which means creation of new threads and terminations of existing
111 : : * threads are blocked until this API returns.
112 : : */
113 : : extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
114 : :
115 : : /**
116 : : * @brief Iterate over all the threads in the system without locking.
117 : : *
118 : : * This routine works exactly the same like @ref k_thread_foreach
119 : : * but unlocks interrupts when user_cb is executed.
120 : : *
121 : : * @param user_cb Pointer to the user callback function.
122 : : * @param user_data Pointer to user data.
123 : : *
124 : : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
125 : : * to be effective.
126 : : * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
127 : : * queue elements. It unlocks it during user callback function processing.
128 : : * If a new task is created when this @c foreach function is in progress,
129 : : * the added new task would not be included in the enumeration.
130 : : * If a task is aborted during this enumeration, there would be a race here
131 : : * and there is a possibility that this aborted task would be included in the
132 : : * enumeration.
133 : : * @note If the task is aborted and the memory occupied by its @c k_thread
134 : : * structure is reused when this @c k_thread_foreach_unlocked is in progress
135 : : * it might even lead to the system behave unstable.
136 : : * This function may never return, as it would follow some @c next task
137 : : * pointers treating given pointer as a pointer to the k_thread structure
138 : : * while it is something different right now.
139 : : * Do not reuse the memory that was occupied by k_thread structure of aborted
140 : : * task if it was aborted after this function was called in any context.
141 : : */
142 : : extern void k_thread_foreach_unlocked(
143 : : k_thread_user_cb_t user_cb, void *user_data);
144 : :
145 : : /** @} */
146 : :
147 : : /**
148 : : * @defgroup thread_apis Thread APIs
149 : : * @ingroup kernel_apis
150 : : * @{
151 : : */
152 : :
153 : : #endif /* !_ASMLANGUAGE */
154 : :
155 : :
156 : : /*
157 : : * Thread user options. May be needed by assembly code. Common part uses low
158 : : * bits, arch-specific use high bits.
159 : : */
160 : :
161 : : /**
162 : : * @brief system thread that must not abort
163 : : * */
164 : : #define K_ESSENTIAL (BIT(0))
165 : :
166 : : #if defined(CONFIG_FPU_SHARING)
167 : : /**
168 : : * @brief FPU registers are managed by context switch
169 : : *
170 : : * @details
171 : : * This option indicates that the thread uses the CPU's floating point
172 : : * registers. This instructs the kernel to take additional steps to save
173 : : * and restore the contents of these registers when scheduling the thread.
174 : : * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
175 : : */
176 : : #define K_FP_REGS (BIT(1))
177 : : #endif
178 : :
179 : : /**
180 : : * @brief user mode thread
181 : : *
182 : : * This thread has dropped from supervisor mode to user mode and consequently
183 : : * has additional restrictions
184 : : */
185 : : #define K_USER (BIT(2))
186 : :
187 : : /**
188 : : * @brief Inherit Permissions
189 : : *
190 : : * @details
191 : : * Indicates that the thread being created should inherit all kernel object
192 : : * permissions from the thread that created it. No effect if
193 : : * @kconfig{CONFIG_USERSPACE} is not enabled.
194 : : */
195 : : #define K_INHERIT_PERMS (BIT(3))
196 : :
197 : : /**
198 : : * @brief Callback item state
199 : : *
200 : : * @details
201 : : * This is a single bit of state reserved for "callback manager"
202 : : * utilities (p4wq initially) who need to track operations invoked
203 : : * from within a user-provided callback they have been invoked.
204 : : * Effectively it serves as a tiny bit of zero-overhead TLS data.
205 : : */
206 : : #define K_CALLBACK_STATE (BIT(4))
207 : :
208 : : #ifdef CONFIG_X86
209 : : /* x86 Bitmask definitions for threads user options */
210 : :
211 : : #if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
212 : : /**
213 : : * @brief FP and SSE registers are managed by context switch on x86
214 : : *
215 : : * @details
216 : : * This option indicates that the thread uses the x86 CPU's floating point
217 : : * and SSE registers. This instructs the kernel to take additional steps to
218 : : * save and restore the contents of these registers when scheduling
219 : : * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
220 : : */
221 : : #define K_SSE_REGS (BIT(7))
222 : : #endif
223 : : #endif
224 : :
225 : : /* end - thread options */
226 : :
227 : : #if !defined(_ASMLANGUAGE)
228 : : /**
229 : : * @brief Create a thread.
230 : : *
231 : : * This routine initializes a thread, then schedules it for execution.
232 : : *
233 : : * The new thread may be scheduled for immediate execution or a delayed start.
234 : : * If the newly spawned thread does not have a delayed start the kernel
235 : : * scheduler may preempt the current thread to allow the new thread to
236 : : * execute.
237 : : *
238 : : * Thread options are architecture-specific, and can include K_ESSENTIAL,
239 : : * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
240 : : * them using "|" (the logical OR operator).
241 : : *
242 : : * Stack objects passed to this function must be originally defined with
243 : : * either of these macros in order to be portable:
244 : : *
245 : : * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
246 : : * supervisor threads.
247 : : * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
248 : : * threads only. These stacks use less memory if CONFIG_USERSPACE is
249 : : * enabled.
250 : : *
251 : : * The stack_size parameter has constraints. It must either be:
252 : : *
253 : : * - The original size value passed to K_THREAD_STACK_DEFINE() or
254 : : * K_KERNEL_STACK_DEFINE()
255 : : * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
256 : : * defined with K_THREAD_STACK_DEFINE()
257 : : * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
258 : : * defined with K_KERNEL_STACK_DEFINE().
259 : : *
260 : : * Using other values, or sizeof(stack) may produce undefined behavior.
261 : : *
262 : : * @param new_thread Pointer to uninitialized struct k_thread
263 : : * @param stack Pointer to the stack space.
264 : : * @param stack_size Stack size in bytes.
265 : : * @param entry Thread entry function.
266 : : * @param p1 1st entry point parameter.
267 : : * @param p2 2nd entry point parameter.
268 : : * @param p3 3rd entry point parameter.
269 : : * @param prio Thread priority.
270 : : * @param options Thread options.
271 : : * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
272 : : *
273 : : * @return ID of new thread.
274 : : *
275 : : */
276 : : __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
277 : : k_thread_stack_t *stack,
278 : : size_t stack_size,
279 : : k_thread_entry_t entry,
280 : : void *p1, void *p2, void *p3,
281 : : int prio, uint32_t options, k_timeout_t delay);
282 : :
283 : : /**
284 : : * @brief Drop a thread's privileges permanently to user mode
285 : : *
286 : : * This allows a supervisor thread to be re-used as a user thread.
287 : : * This function does not return, but control will transfer to the provided
288 : : * entry point as if this was a new user thread.
289 : : *
290 : : * The implementation ensures that the stack buffer contents are erased.
291 : : * Any thread-local storage will be reverted to a pristine state.
292 : : *
293 : : * Memory domain membership, resource pool assignment, kernel object
294 : : * permissions, priority, and thread options are preserved.
295 : : *
296 : : * A common use of this function is to re-use the main thread as a user thread
297 : : * once all supervisor mode-only tasks have been completed.
298 : : *
299 : : * @param entry Function to start executing from
300 : : * @param p1 1st entry point parameter
301 : : * @param p2 2nd entry point parameter
302 : : * @param p3 3rd entry point parameter
303 : : */
304 : : extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
305 : : void *p1, void *p2,
306 : : void *p3);
307 : :
308 : : /**
309 : : * @brief Grant a thread access to a set of kernel objects
310 : : *
311 : : * This is a convenience function. For the provided thread, grant access to
312 : : * the remaining arguments, which must be pointers to kernel objects.
313 : : *
314 : : * The thread object must be initialized (i.e. running). The objects don't
315 : : * need to be.
316 : : * Note that NULL shouldn't be passed as an argument.
317 : : *
318 : : * @param thread Thread to grant access to objects
319 : : * @param ... list of kernel object pointers
320 : : */
321 : : #define k_thread_access_grant(thread, ...) \
322 : : FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
323 : :
324 : : /**
325 : : * @brief Assign a resource memory pool to a thread
326 : : *
327 : : * By default, threads have no resource pool assigned unless their parent
328 : : * thread has a resource pool, in which case it is inherited. Multiple
329 : : * threads may be assigned to the same memory pool.
330 : : *
331 : : * Changing a thread's resource pool will not migrate allocations from the
332 : : * previous pool.
333 : : *
334 : : * @param thread Target thread to assign a memory pool for resource requests.
335 : : * @param heap Heap object to use for resources,
336 : : * or NULL if the thread should no longer have a memory pool.
337 : : */
338 : : static inline void k_thread_heap_assign(struct k_thread *thread,
339 : : struct k_heap *heap)
340 : : {
341 : : thread->resource_pool = heap;
342 : : }
343 : :
344 : : #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
345 : : /**
346 : : * @brief Obtain stack usage information for the specified thread
347 : : *
348 : : * User threads will need to have permission on the target thread object.
349 : : *
350 : : * Some hardware may prevent inspection of a stack buffer currently in use.
351 : : * If this API is called from supervisor mode, on the currently running thread,
352 : : * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
353 : : * error will be generated.
354 : : *
355 : : * @param thread Thread to inspect stack information
356 : : * @param unused_ptr Output parameter, filled in with the unused stack space
357 : : * of the target thread in bytes.
358 : : * @return 0 on success
359 : : * @return -EBADF Bad thread object (user mode only)
360 : : * @return -EPERM No permissions on thread object (user mode only)
361 : : * #return -ENOTSUP Forbidden by hardware policy
362 : : * @return -EINVAL Thread is uninitialized or exited (user mode only)
363 : : * @return -EFAULT Bad memory address for unused_ptr (user mode only)
364 : : */
365 : : __syscall int k_thread_stack_space_get(const struct k_thread *thread,
366 : : size_t *unused_ptr);
367 : : #endif
368 : :
369 : : #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
370 : : /**
371 : : * @brief Assign the system heap as a thread's resource pool
372 : : *
373 : : * Similar to z_thread_heap_assign(), but the thread will use
374 : : * the kernel heap to draw memory.
375 : : *
376 : : * Use with caution, as a malicious thread could perform DoS attacks on the
377 : : * kernel heap.
378 : : *
379 : : * @param thread Target thread to assign the system heap for resource requests
380 : : *
381 : : */
382 : : void k_thread_system_pool_assign(struct k_thread *thread);
383 : : #endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
384 : :
385 : : /**
386 : : * @brief Sleep until a thread exits
387 : : *
388 : : * The caller will be put to sleep until the target thread exits, either due
389 : : * to being aborted, self-exiting, or taking a fatal error. This API returns
390 : : * immediately if the thread isn't running.
391 : : *
392 : : * This API may only be called from ISRs with a K_NO_WAIT timeout,
393 : : * where it can be useful as a predicate to detect when a thread has
394 : : * aborted.
395 : : *
396 : : * @param thread Thread to wait to exit
397 : : * @param timeout upper bound time to wait for the thread to exit.
398 : : * @retval 0 success, target thread has exited or wasn't running
399 : : * @retval -EBUSY returned without waiting
400 : : * @retval -EAGAIN waiting period timed out
401 : : * @retval -EDEADLK target thread is joining on the caller, or target thread
402 : : * is the caller
403 : : */
404 : : __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
405 : :
406 : : /**
407 : : * @brief Put the current thread to sleep.
408 : : *
409 : : * This routine puts the current thread to sleep for @a duration,
410 : : * specified as a k_timeout_t object.
411 : : *
412 : : * @note if @a timeout is set to K_FOREVER then the thread is suspended.
413 : : *
414 : : * @param timeout Desired duration of sleep.
415 : : *
416 : : * @return Zero if the requested time has elapsed or the number of milliseconds
417 : : * left to sleep, if thread was woken up by \ref k_wakeup call.
418 : : */
419 : : __syscall int32_t k_sleep(k_timeout_t timeout);
420 : :
421 : : /**
422 : : * @brief Put the current thread to sleep.
423 : : *
424 : : * This routine puts the current thread to sleep for @a duration milliseconds.
425 : : *
426 : : * @param ms Number of milliseconds to sleep.
427 : : *
428 : : * @return Zero if the requested time has elapsed or the number of milliseconds
429 : : * left to sleep, if thread was woken up by \ref k_wakeup call.
430 : : */
431 : 0 : static inline int32_t k_msleep(int32_t ms)
432 : : {
433 : 0 : return k_sleep(Z_TIMEOUT_MS(ms));
434 : : }
435 : :
436 : : /**
437 : : * @brief Put the current thread to sleep with microsecond resolution.
438 : : *
439 : : * This function is unlikely to work as expected without kernel tuning.
440 : : * In particular, because the lower bound on the duration of a sleep is
441 : : * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
442 : : * adjusted to achieve the resolution desired. The implications of doing
443 : : * this must be understood before attempting to use k_usleep(). Use with
444 : : * caution.
445 : : *
446 : : * @param us Number of microseconds to sleep.
447 : : *
448 : : * @return Zero if the requested time has elapsed or the number of microseconds
449 : : * left to sleep, if thread was woken up by \ref k_wakeup call.
450 : : */
451 : : __syscall int32_t k_usleep(int32_t us);
452 : :
453 : : /**
454 : : * @brief Cause the current thread to busy wait.
455 : : *
456 : : * This routine causes the current thread to execute a "do nothing" loop for
457 : : * @a usec_to_wait microseconds.
458 : : *
459 : : * @note The clock used for the microsecond-resolution delay here may
460 : : * be skewed relative to the clock used for system timeouts like
461 : : * k_sleep(). For example k_busy_wait(1000) may take slightly more or
462 : : * less time than k_sleep(K_MSEC(1)), with the offset dependent on
463 : : * clock tolerances.
464 : : */
465 : : __syscall void k_busy_wait(uint32_t usec_to_wait);
466 : :
467 : : /**
468 : : * @brief Yield the current thread.
469 : : *
470 : : * This routine causes the current thread to yield execution to another
471 : : * thread of the same or higher priority. If there are no other ready threads
472 : : * of the same or higher priority, the routine returns immediately.
473 : : */
474 : : __syscall void k_yield(void);
475 : :
476 : : /**
477 : : * @brief Wake up a sleeping thread.
478 : : *
479 : : * This routine prematurely wakes up @a thread from sleeping.
480 : : *
481 : : * If @a thread is not currently sleeping, the routine has no effect.
482 : : *
483 : : * @param thread ID of thread to wake.
484 : : */
485 : : __syscall void k_wakeup(k_tid_t thread);
486 : :
487 : : /**
488 : : * @brief Get thread ID of the current thread.
489 : : *
490 : : * This unconditionally queries the kernel via a system call.
491 : : *
492 : : * @return ID of current thread.
493 : : */
494 : : __attribute_const__
495 : : __syscall k_tid_t z_current_get(void);
496 : :
497 : : #ifdef CONFIG_THREAD_LOCAL_STORAGE
498 : : /* Thread-local cache of current thread ID, set in z_thread_entry() */
499 : : extern __thread k_tid_t z_tls_current;
500 : : #endif
501 : :
502 : : /**
503 : : * @brief Get thread ID of the current thread.
504 : : *
505 : : * @return ID of current thread.
506 : : *
507 : : */
508 : : __attribute_const__
509 : 1 : static inline k_tid_t k_current_get(void)
510 : : {
511 : : #ifdef CONFIG_THREAD_LOCAL_STORAGE
512 : : return z_tls_current;
513 : : #else
514 : 1 : return z_current_get();
515 : : #endif
516 : : }
517 : :
518 : : /**
519 : : * @brief Abort a thread.
520 : : *
521 : : * This routine permanently stops execution of @a thread. The thread is taken
522 : : * off all kernel queues it is part of (i.e. the ready queue, the timeout
523 : : * queue, or a kernel object wait queue). However, any kernel resources the
524 : : * thread might currently own (such as mutexes or memory blocks) are not
525 : : * released. It is the responsibility of the caller of this routine to ensure
526 : : * all necessary cleanup is performed.
527 : : *
528 : : * After k_thread_abort() returns, the thread is guaranteed not to be
529 : : * running or to become runnable anywhere on the system. Normally
530 : : * this is done via blocking the caller (in the same manner as
531 : : * k_thread_join()), but in interrupt context on SMP systems the
532 : : * implementation is required to spin for threads that are running on
533 : : * other CPUs. Note that as specified, this means that on SMP
534 : : * platforms it is possible for application code to create a deadlock
535 : : * condition by simultaneously aborting a cycle of threads using at
536 : : * least one termination from interrupt context. Zephyr cannot detect
537 : : * all such conditions.
538 : : *
539 : : * @param thread ID of thread to abort.
540 : : */
541 : : __syscall void k_thread_abort(k_tid_t thread);
542 : :
543 : :
544 : : /**
545 : : * @brief Start an inactive thread
546 : : *
547 : : * If a thread was created with K_FOREVER in the delay parameter, it will
548 : : * not be added to the scheduling queue until this function is called
549 : : * on it.
550 : : *
551 : : * @param thread thread to start
552 : : */
553 : : __syscall void k_thread_start(k_tid_t thread);
554 : :
555 : : extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
556 : : extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
557 : :
558 : : #ifdef CONFIG_SYS_CLOCK_EXISTS
559 : :
560 : : /**
561 : : * @brief Get time when a thread wakes up, in system ticks
562 : : *
563 : : * This routine computes the system uptime when a waiting thread next
564 : : * executes, in units of system ticks. If the thread is not waiting,
565 : : * it returns current system time.
566 : : */
567 : : __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
568 : :
569 : : static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
570 : : const struct k_thread *t)
571 : : {
572 : : return z_timeout_expires(&t->base.timeout);
573 : : }
574 : :
575 : : /**
576 : : * @brief Get time remaining before a thread wakes up, in system ticks
577 : : *
578 : : * This routine computes the time remaining before a waiting thread
579 : : * next executes, in units of system ticks. If the thread is not
580 : : * waiting, it returns zero.
581 : : */
582 : : __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
583 : :
584 : : static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
585 : : const struct k_thread *t)
586 : : {
587 : : return z_timeout_remaining(&t->base.timeout);
588 : : }
589 : :
590 : : #endif /* CONFIG_SYS_CLOCK_EXISTS */
591 : :
592 : : /**
593 : : * @cond INTERNAL_HIDDEN
594 : : */
595 : :
596 : : /* timeout has timed out and is not on _timeout_q anymore */
597 : : #define _EXPIRED (-2)
598 : :
599 : : struct _static_thread_data {
600 : : struct k_thread *init_thread;
601 : : k_thread_stack_t *init_stack;
602 : : unsigned int init_stack_size;
603 : : k_thread_entry_t init_entry;
604 : : void *init_p1;
605 : : void *init_p2;
606 : : void *init_p3;
607 : : int init_prio;
608 : : uint32_t init_options;
609 : : int32_t init_delay;
610 : : void (*init_abort)(void);
611 : : const char *init_name;
612 : : };
613 : :
614 : : #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
615 : : entry, p1, p2, p3, \
616 : : prio, options, delay, abort, tname) \
617 : : { \
618 : : .init_thread = (thread), \
619 : : .init_stack = (stack), \
620 : : .init_stack_size = (stack_size), \
621 : : .init_entry = (k_thread_entry_t)entry, \
622 : : .init_p1 = (void *)p1, \
623 : : .init_p2 = (void *)p2, \
624 : : .init_p3 = (void *)p3, \
625 : : .init_prio = (prio), \
626 : : .init_options = (options), \
627 : : .init_delay = (delay), \
628 : : .init_abort = (abort), \
629 : : .init_name = STRINGIFY(tname), \
630 : : }
631 : :
632 : : /**
633 : : * INTERNAL_HIDDEN @endcond
634 : : */
635 : :
636 : : /**
637 : : * @brief Statically define and initialize a thread.
638 : : *
639 : : * The thread may be scheduled for immediate execution or a delayed start.
640 : : *
641 : : * Thread options are architecture-specific, and can include K_ESSENTIAL,
642 : : * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
643 : : * them using "|" (the logical OR operator).
644 : : *
645 : : * The ID of the thread can be accessed using:
646 : : *
647 : : * @code extern const k_tid_t <name>; @endcode
648 : : *
649 : : * @param name Name of the thread.
650 : : * @param stack_size Stack size in bytes.
651 : : * @param entry Thread entry function.
652 : : * @param p1 1st entry point parameter.
653 : : * @param p2 2nd entry point parameter.
654 : : * @param p3 3rd entry point parameter.
655 : : * @param prio Thread priority.
656 : : * @param options Thread options.
657 : : * @param delay Scheduling delay (in milliseconds), zero for no delay.
658 : : *
659 : : *
660 : : * @internal It has been observed that the x86 compiler by default aligns
661 : : * these _static_thread_data structures to 32-byte boundaries, thereby
662 : : * wasting space. To work around this, force a 4-byte alignment.
663 : : *
664 : : */
665 : : #define K_THREAD_DEFINE(name, stack_size, \
666 : : entry, p1, p2, p3, \
667 : : prio, options, delay) \
668 : : K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
669 : : struct k_thread _k_thread_obj_##name; \
670 : : STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
671 : : Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
672 : : _k_thread_stack_##name, stack_size, \
673 : : entry, p1, p2, p3, prio, options, delay, \
674 : : NULL, name); \
675 : : const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
676 : :
677 : : /**
678 : : * @brief Get a thread's priority.
679 : : *
680 : : * This routine gets the priority of @a thread.
681 : : *
682 : : * @param thread ID of thread whose priority is needed.
683 : : *
684 : : * @return Priority of @a thread.
685 : : */
686 : : __syscall int k_thread_priority_get(k_tid_t thread);
687 : :
688 : : /**
689 : : * @brief Set a thread's priority.
690 : : *
691 : : * This routine immediately changes the priority of @a thread.
692 : : *
693 : : * Rescheduling can occur immediately depending on the priority @a thread is
694 : : * set to:
695 : : *
696 : : * - If its priority is raised above the priority of the caller of this
697 : : * function, and the caller is preemptible, @a thread will be scheduled in.
698 : : *
699 : : * - If the caller operates on itself, it lowers its priority below that of
700 : : * other threads in the system, and the caller is preemptible, the thread of
701 : : * highest priority will be scheduled in.
702 : : *
703 : : * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
704 : : * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
705 : : * highest priority.
706 : : *
707 : : * @param thread ID of thread whose priority is to be set.
708 : : * @param prio New priority.
709 : : *
710 : : * @warning Changing the priority of a thread currently involved in mutex
711 : : * priority inheritance may result in undefined behavior.
712 : : */
713 : : __syscall void k_thread_priority_set(k_tid_t thread, int prio);
714 : :
715 : :
716 : : #ifdef CONFIG_SCHED_DEADLINE
717 : : /**
718 : : * @brief Set deadline expiration time for scheduler
719 : : *
720 : : * This sets the "deadline" expiration as a time delta from the
721 : : * current time, in the same units used by k_cycle_get_32(). The
722 : : * scheduler (when deadline scheduling is enabled) will choose the
723 : : * next expiring thread when selecting between threads at the same
724 : : * static priority. Threads at different priorities will be scheduled
725 : : * according to their static priority.
726 : : *
727 : : * @note Deadlines are stored internally using 32 bit unsigned
728 : : * integers. The number of cycles between the "first" deadline in the
729 : : * scheduler queue and the "last" deadline must be less than 2^31 (i.e
730 : : * a signed non-negative quantity). Failure to adhere to this rule
731 : : * may result in scheduled threads running in an incorrect deadline
732 : : * order.
733 : : *
734 : : * @note Despite the API naming, the scheduler makes no guarantees the
735 : : * the thread WILL be scheduled within that deadline, nor does it take
736 : : * extra metadata (like e.g. the "runtime" and "period" parameters in
737 : : * Linux sched_setattr()) that allows the kernel to validate the
738 : : * scheduling for achievability. Such features could be implemented
739 : : * above this call, which is simply input to the priority selection
740 : : * logic.
741 : : *
742 : : * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
743 : : * configuration.
744 : : *
745 : : * @param thread A thread on which to set the deadline
746 : : * @param deadline A time delta, in cycle units
747 : : *
748 : : */
749 : : __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
750 : : #endif
751 : :
752 : : #ifdef CONFIG_SCHED_CPU_MASK
753 : : /**
754 : : * @brief Sets all CPU enable masks to zero
755 : : *
756 : : * After this returns, the thread will no longer be schedulable on any
757 : : * CPUs. The thread must not be currently runnable.
758 : : *
759 : : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
760 : : * configuration.
761 : : *
762 : : * @param thread Thread to operate upon
763 : : * @return Zero on success, otherwise error code
764 : : */
765 : : int k_thread_cpu_mask_clear(k_tid_t thread);
766 : :
767 : : /**
768 : : * @brief Sets all CPU enable masks to one
769 : : *
770 : : * After this returns, the thread will be schedulable on any CPU. The
771 : : * thread must not be currently runnable.
772 : : *
773 : : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
774 : : * configuration.
775 : : *
776 : : * @param thread Thread to operate upon
777 : : * @return Zero on success, otherwise error code
778 : : */
779 : : int k_thread_cpu_mask_enable_all(k_tid_t thread);
780 : :
781 : : /**
782 : : * @brief Enable thread to run on specified CPU
783 : : *
784 : : * The thread must not be currently runnable.
785 : : *
786 : : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
787 : : * configuration.
788 : : *
789 : : * @param thread Thread to operate upon
790 : : * @param cpu CPU index
791 : : * @return Zero on success, otherwise error code
792 : : */
793 : : int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
794 : :
795 : : /**
796 : : * @brief Prevent thread to run on specified CPU
797 : : *
798 : : * The thread must not be currently runnable.
799 : : *
800 : : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
801 : : * configuration.
802 : : *
803 : : * @param thread Thread to operate upon
804 : : * @param cpu CPU index
805 : : * @return Zero on success, otherwise error code
806 : : */
807 : : int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
808 : :
809 : : /**
810 : : * @brief Pin a thread to a CPU
811 : : *
812 : : * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
813 : : * thread on the selected CPU.
814 : : *
815 : : * @param thread Thread to operate upon
816 : : * @param cpu CPU index
817 : : * @return Zero on success, otherwise error code
818 : : */
819 : : int k_thread_cpu_pin(k_tid_t thread, int cpu);
820 : : #endif
821 : :
822 : : /**
823 : : * @brief Suspend a thread.
824 : : *
825 : : * This routine prevents the kernel scheduler from making @a thread
826 : : * the current thread. All other internal operations on @a thread are
827 : : * still performed; for example, kernel objects it is waiting on are
828 : : * still handed to it. Note that any existing timeouts
829 : : * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
830 : : * will be canceled. On resume, the thread will begin running
831 : : * immediately and return from the blocked call.
832 : : *
833 : : * If @a thread is already suspended, the routine has no effect.
834 : : *
835 : : * @param thread ID of thread to suspend.
836 : : */
837 : : __syscall void k_thread_suspend(k_tid_t thread);
838 : :
839 : : /**
840 : : * @brief Resume a suspended thread.
841 : : *
842 : : * This routine allows the kernel scheduler to make @a thread the current
843 : : * thread, when it is next eligible for that role.
844 : : *
845 : : * If @a thread is not currently suspended, the routine has no effect.
846 : : *
847 : : * @param thread ID of thread to resume.
848 : : */
849 : : __syscall void k_thread_resume(k_tid_t thread);
850 : :
851 : : /**
852 : : * @brief Set time-slicing period and scope.
853 : : *
854 : : * This routine specifies how the scheduler will perform time slicing of
855 : : * preemptible threads.
856 : : *
857 : : * To enable time slicing, @a slice must be non-zero. The scheduler
858 : : * ensures that no thread runs for more than the specified time limit
859 : : * before other threads of that priority are given a chance to execute.
860 : : * Any thread whose priority is higher than @a prio is exempted, and may
861 : : * execute as long as desired without being preempted due to time slicing.
862 : : *
863 : : * Time slicing only limits the maximum amount of time a thread may continuously
864 : : * execute. Once the scheduler selects a thread for execution, there is no
865 : : * minimum guaranteed time the thread will execute before threads of greater or
866 : : * equal priority are scheduled.
867 : : *
868 : : * When the current thread is the only one of that priority eligible
869 : : * for execution, this routine has no effect; the thread is immediately
870 : : * rescheduled after the slice period expires.
871 : : *
872 : : * To disable timeslicing, set both @a slice and @a prio to zero.
873 : : *
874 : : * @param slice Maximum time slice length (in milliseconds).
875 : : * @param prio Highest thread priority level eligible for time slicing.
876 : : */
877 : : extern void k_sched_time_slice_set(int32_t slice, int prio);
878 : :
879 : : /**
880 : : * @brief Set thread time slice
881 : : *
882 : : * As for k_sched_time_slice_set, but (when
883 : : * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
884 : : * thread. When non-zero, this timeslice will take precedence over
885 : : * the global value.
886 : : *
887 : : * When such a thread's timeslice expires, the configured callback
888 : : * will be called before the thread is removed/re-added to the run
889 : : * queue. This callback will occur in interrupt context, and the
890 : : * specified thread is guaranteed to have been preempted by the
891 : : * currently-executing ISR. Such a callback is free to, for example,
892 : : * modify the thread priority or slice time for future execution,
893 : : * suspend the thread, etc...
894 : : *
895 : : * @note Unlike the older API, the time slice parameter here is
896 : : * specified in ticks, not milliseconds. Ticks have always been the
897 : : * internal unit, and not all platforms have integer conversions
898 : : * between the two.
899 : : *
900 : : * @note Threads with a non-zero slice time set will be timesliced
901 : : * always, even if they are higher priority than the maximum timeslice
902 : : * priority set via k_sched_time_slice_set().
903 : : *
904 : : * @note The callback notification for slice expiration happens, as it
905 : : * must, while the thread is still "current", and thus it happens
906 : : * before any registered timeouts at this tick. This has the somewhat
907 : : * confusing side effect that the tick time (c.f. k_uptime_get()) does
908 : : * not yet reflect the expired ticks. Applications wishing to make
909 : : * fine-grained timing decisions within this callback should use the
910 : : * cycle API, or derived facilities like k_thread_runtime_stats_get().
911 : : *
912 : : * @param th A valid, initialized thread
913 : : * @param slice_ticks Maximum timeslice, in ticks
914 : : * @param expired Callback function called on slice expiration
915 : : * @param data Parameter for the expiration handler
916 : : */
917 : : void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
918 : : k_thread_timeslice_fn_t expired, void *data);
919 : :
920 : : /** @} */
921 : :
922 : : /**
923 : : * @addtogroup isr_apis
924 : : * @{
925 : : */
926 : :
927 : : /**
928 : : * @brief Determine if code is running at interrupt level.
929 : : *
930 : : * This routine allows the caller to customize its actions, depending on
931 : : * whether it is a thread or an ISR.
932 : : *
933 : : * @funcprops \isr_ok
934 : : *
935 : : * @return false if invoked by a thread.
936 : : * @return true if invoked by an ISR.
937 : : */
938 : : extern bool k_is_in_isr(void);
939 : :
940 : : /**
941 : : * @brief Determine if code is running in a preemptible thread.
942 : : *
943 : : * This routine allows the caller to customize its actions, depending on
944 : : * whether it can be preempted by another thread. The routine returns a 'true'
945 : : * value if all of the following conditions are met:
946 : : *
947 : : * - The code is running in a thread, not at ISR.
948 : : * - The thread's priority is in the preemptible range.
949 : : * - The thread has not locked the scheduler.
950 : : *
951 : : * @funcprops \isr_ok
952 : : *
953 : : * @return 0 if invoked by an ISR or by a cooperative thread.
954 : : * @return Non-zero if invoked by a preemptible thread.
955 : : */
956 : : __syscall int k_is_preempt_thread(void);
957 : :
958 : : /**
959 : : * @brief Test whether startup is in the before-main-task phase.
960 : : *
961 : : * This routine allows the caller to customize its actions, depending on
962 : : * whether it being invoked before the kernel is fully active.
963 : : *
964 : : * @funcprops \isr_ok
965 : : *
966 : : * @return true if invoked before post-kernel initialization
967 : : * @return false if invoked during/after post-kernel initialization
968 : : */
969 : 191157 : static inline bool k_is_pre_kernel(void)
970 : : {
971 : : extern bool z_sys_post_kernel; /* in init.c */
972 : :
973 : 191157 : return !z_sys_post_kernel;
974 : : }
975 : :
976 : : /**
977 : : * @}
978 : : */
979 : :
980 : : /**
981 : : * @addtogroup thread_apis
982 : : * @{
983 : : */
984 : :
985 : : /**
986 : : * @brief Lock the scheduler.
987 : : *
988 : : * This routine prevents the current thread from being preempted by another
989 : : * thread by instructing the scheduler to treat it as a cooperative thread.
990 : : * If the thread subsequently performs an operation that makes it unready,
991 : : * it will be context switched out in the normal manner. When the thread
992 : : * again becomes the current thread, its non-preemptible status is maintained.
993 : : *
994 : : * This routine can be called recursively.
995 : : *
996 : : * @note k_sched_lock() and k_sched_unlock() should normally be used
997 : : * when the operation being performed can be safely interrupted by ISRs.
998 : : * However, if the amount of processing involved is very small, better
999 : : * performance may be obtained by using irq_lock() and irq_unlock().
1000 : : */
1001 : : extern void k_sched_lock(void);
1002 : :
1003 : : /**
1004 : : * @brief Unlock the scheduler.
1005 : : *
1006 : : * This routine reverses the effect of a previous call to k_sched_lock().
1007 : : * A thread must call the routine once for each time it called k_sched_lock()
1008 : : * before the thread becomes preemptible.
1009 : : */
1010 : : extern void k_sched_unlock(void);
1011 : :
1012 : : /**
1013 : : * @brief Set current thread's custom data.
1014 : : *
1015 : : * This routine sets the custom data for the current thread to @ value.
1016 : : *
1017 : : * Custom data is not used by the kernel itself, and is freely available
1018 : : * for a thread to use as it sees fit. It can be used as a framework
1019 : : * upon which to build thread-local storage.
1020 : : *
1021 : : * @param value New custom data value.
1022 : : *
1023 : : */
1024 : : __syscall void k_thread_custom_data_set(void *value);
1025 : :
1026 : : /**
1027 : : * @brief Get current thread's custom data.
1028 : : *
1029 : : * This routine returns the custom data for the current thread.
1030 : : *
1031 : : * @return Current custom data value.
1032 : : */
1033 : : __syscall void *k_thread_custom_data_get(void);
1034 : :
1035 : : /**
1036 : : * @brief Set current thread name
1037 : : *
1038 : : * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1039 : : * is enabled for tracing and debugging.
1040 : : *
1041 : : * @param thread Thread to set name, or NULL to set the current thread
1042 : : * @param str Name string
1043 : : * @retval 0 on success
1044 : : * @retval -EFAULT Memory access error with supplied string
1045 : : * @retval -ENOSYS Thread name configuration option not enabled
1046 : : * @retval -EINVAL Thread name too long
1047 : : */
1048 : : __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1049 : :
1050 : : /**
1051 : : * @brief Get thread name
1052 : : *
1053 : : * Get the name of a thread
1054 : : *
1055 : : * @param thread Thread ID
1056 : : * @retval Thread name, or NULL if configuration not enabled
1057 : : */
1058 : : const char *k_thread_name_get(k_tid_t thread);
1059 : :
1060 : : /**
1061 : : * @brief Copy the thread name into a supplied buffer
1062 : : *
1063 : : * @param thread Thread to obtain name information
1064 : : * @param buf Destination buffer
1065 : : * @param size Destination buffer size
1066 : : * @retval -ENOSPC Destination buffer too small
1067 : : * @retval -EFAULT Memory access error
1068 : : * @retval -ENOSYS Thread name feature not enabled
1069 : : * @retval 0 Success
1070 : : */
1071 : : __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1072 : : size_t size);
1073 : :
1074 : : /**
1075 : : * @brief Get thread state string
1076 : : *
1077 : : * Get the human friendly thread state string
1078 : : *
1079 : : * @param thread_id Thread ID
1080 : : * @retval Thread state string, empty if no state flag is set
1081 : : */
1082 : : const char *k_thread_state_str(k_tid_t thread_id);
1083 : :
1084 : : /**
1085 : : * @}
1086 : : */
1087 : :
1088 : : /**
1089 : : * @addtogroup clock_apis
1090 : : * @{
1091 : : */
1092 : :
1093 : : /**
1094 : : * @brief Generate null timeout delay.
1095 : : *
1096 : : * This macro generates a timeout delay that instructs a kernel API
1097 : : * not to wait if the requested operation cannot be performed immediately.
1098 : : *
1099 : : * @return Timeout delay value.
1100 : : */
1101 : : #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1102 : :
1103 : : /**
1104 : : * @brief Generate timeout delay from nanoseconds.
1105 : : *
1106 : : * This macro generates a timeout delay that instructs a kernel API to
1107 : : * wait up to @a t nanoseconds to perform the requested operation.
1108 : : * Note that timer precision is limited to the tick rate, not the
1109 : : * requested value.
1110 : : *
1111 : : * @param t Duration in nanoseconds.
1112 : : *
1113 : : * @return Timeout delay value.
1114 : : */
1115 : : #define K_NSEC(t) Z_TIMEOUT_NS(t)
1116 : :
1117 : : /**
1118 : : * @brief Generate timeout delay from microseconds.
1119 : : *
1120 : : * This macro generates a timeout delay that instructs a kernel API
1121 : : * to wait up to @a t microseconds to perform the requested operation.
1122 : : * Note that timer precision is limited to the tick rate, not the
1123 : : * requested value.
1124 : : *
1125 : : * @param t Duration in microseconds.
1126 : : *
1127 : : * @return Timeout delay value.
1128 : : */
1129 : : #define K_USEC(t) Z_TIMEOUT_US(t)
1130 : :
1131 : : /**
1132 : : * @brief Generate timeout delay from cycles.
1133 : : *
1134 : : * This macro generates a timeout delay that instructs a kernel API
1135 : : * to wait up to @a t cycles to perform the requested operation.
1136 : : *
1137 : : * @param t Duration in cycles.
1138 : : *
1139 : : * @return Timeout delay value.
1140 : : */
1141 : : #define K_CYC(t) Z_TIMEOUT_CYC(t)
1142 : :
1143 : : /**
1144 : : * @brief Generate timeout delay from system ticks.
1145 : : *
1146 : : * This macro generates a timeout delay that instructs a kernel API
1147 : : * to wait up to @a t ticks to perform the requested operation.
1148 : : *
1149 : : * @param t Duration in system ticks.
1150 : : *
1151 : : * @return Timeout delay value.
1152 : : */
1153 : : #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1154 : :
1155 : : /**
1156 : : * @brief Generate timeout delay from milliseconds.
1157 : : *
1158 : : * This macro generates a timeout delay that instructs a kernel API
1159 : : * to wait up to @a ms milliseconds to perform the requested operation.
1160 : : *
1161 : : * @param ms Duration in milliseconds.
1162 : : *
1163 : : * @return Timeout delay value.
1164 : : */
1165 : : #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1166 : :
1167 : : /**
1168 : : * @brief Generate timeout delay from seconds.
1169 : : *
1170 : : * This macro generates a timeout delay that instructs a kernel API
1171 : : * to wait up to @a s seconds to perform the requested operation.
1172 : : *
1173 : : * @param s Duration in seconds.
1174 : : *
1175 : : * @return Timeout delay value.
1176 : : */
1177 : : #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1178 : :
1179 : : /**
1180 : : * @brief Generate timeout delay from minutes.
1181 : :
1182 : : * This macro generates a timeout delay that instructs a kernel API
1183 : : * to wait up to @a m minutes to perform the requested operation.
1184 : : *
1185 : : * @param m Duration in minutes.
1186 : : *
1187 : : * @return Timeout delay value.
1188 : : */
1189 : : #define K_MINUTES(m) K_SECONDS((m) * 60)
1190 : :
1191 : : /**
1192 : : * @brief Generate timeout delay from hours.
1193 : : *
1194 : : * This macro generates a timeout delay that instructs a kernel API
1195 : : * to wait up to @a h hours to perform the requested operation.
1196 : : *
1197 : : * @param h Duration in hours.
1198 : : *
1199 : : * @return Timeout delay value.
1200 : : */
1201 : : #define K_HOURS(h) K_MINUTES((h) * 60)
1202 : :
1203 : : /**
1204 : : * @brief Generate infinite timeout delay.
1205 : : *
1206 : : * This macro generates a timeout delay that instructs a kernel API
1207 : : * to wait as long as necessary to perform the requested operation.
1208 : : *
1209 : : * @return Timeout delay value.
1210 : : */
1211 : : #define K_FOREVER Z_FOREVER
1212 : :
1213 : : #ifdef CONFIG_TIMEOUT_64BIT
1214 : :
1215 : : /**
1216 : : * @brief Generates an absolute/uptime timeout value from system ticks
1217 : : *
1218 : : * This macro generates a timeout delay that represents an expiration
1219 : : * at the absolute uptime value specified, in system ticks. That is, the
1220 : : * timeout will expire immediately after the system uptime reaches the
1221 : : * specified tick count.
1222 : : *
1223 : : * @param t Tick uptime value
1224 : : * @return Timeout delay value
1225 : : */
1226 : : #define K_TIMEOUT_ABS_TICKS(t) \
1227 : : Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1228 : :
1229 : : /**
1230 : : * @brief Generates an absolute/uptime timeout value from milliseconds
1231 : : *
1232 : : * This macro generates a timeout delay that represents an expiration
1233 : : * at the absolute uptime value specified, in milliseconds. That is,
1234 : : * the timeout will expire immediately after the system uptime reaches
1235 : : * the specified tick count.
1236 : : *
1237 : : * @param t Millisecond uptime value
1238 : : * @return Timeout delay value
1239 : : */
1240 : : #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1241 : :
1242 : : /**
1243 : : * @brief Generates an absolute/uptime timeout value from microseconds
1244 : : *
1245 : : * This macro generates a timeout delay that represents an expiration
1246 : : * at the absolute uptime value specified, in microseconds. That is,
1247 : : * the timeout will expire immediately after the system uptime reaches
1248 : : * the specified time. Note that timer precision is limited by the
1249 : : * system tick rate and not the requested timeout value.
1250 : : *
1251 : : * @param t Microsecond uptime value
1252 : : * @return Timeout delay value
1253 : : */
1254 : : #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1255 : :
1256 : : /**
1257 : : * @brief Generates an absolute/uptime timeout value from nanoseconds
1258 : : *
1259 : : * This macro generates a timeout delay that represents an expiration
1260 : : * at the absolute uptime value specified, in nanoseconds. That is,
1261 : : * the timeout will expire immediately after the system uptime reaches
1262 : : * the specified time. Note that timer precision is limited by the
1263 : : * system tick rate and not the requested timeout value.
1264 : : *
1265 : : * @param t Nanosecond uptime value
1266 : : * @return Timeout delay value
1267 : : */
1268 : : #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1269 : :
1270 : : /**
1271 : : * @brief Generates an absolute/uptime timeout value from system cycles
1272 : : *
1273 : : * This macro generates a timeout delay that represents an expiration
1274 : : * at the absolute uptime value specified, in cycles. That is, the
1275 : : * timeout will expire immediately after the system uptime reaches the
1276 : : * specified time. Note that timer precision is limited by the system
1277 : : * tick rate and not the requested timeout value.
1278 : : *
1279 : : * @param t Cycle uptime value
1280 : : * @return Timeout delay value
1281 : : */
1282 : : #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1283 : :
1284 : : #endif
1285 : :
1286 : : /**
1287 : : * @}
1288 : : */
1289 : :
1290 : : /**
1291 : : * @cond INTERNAL_HIDDEN
1292 : : */
1293 : :
1294 : : struct k_timer {
1295 : : /*
1296 : : * _timeout structure must be first here if we want to use
1297 : : * dynamic timer allocation. timeout.node is used in the double-linked
1298 : : * list of free timers
1299 : : */
1300 : : struct _timeout timeout;
1301 : :
1302 : : /* wait queue for the (single) thread waiting on this timer */
1303 : : _wait_q_t wait_q;
1304 : :
1305 : : /* runs in ISR context */
1306 : : void (*expiry_fn)(struct k_timer *timer);
1307 : :
1308 : : /* runs in the context of the thread that calls k_timer_stop() */
1309 : : void (*stop_fn)(struct k_timer *timer);
1310 : :
1311 : : /* timer period */
1312 : : k_timeout_t period;
1313 : :
1314 : : /* timer status */
1315 : : uint32_t status;
1316 : :
1317 : : /* user-specific data, also used to support legacy features */
1318 : : void *user_data;
1319 : :
1320 : : SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1321 : : };
1322 : :
1323 : : #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1324 : : { \
1325 : : .timeout = { \
1326 : : .node = {},\
1327 : : .fn = z_timer_expiration_handler, \
1328 : : .dticks = 0, \
1329 : : }, \
1330 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1331 : : .expiry_fn = expiry, \
1332 : : .stop_fn = stop, \
1333 : : .status = 0, \
1334 : : .user_data = 0, \
1335 : : }
1336 : :
1337 : : /**
1338 : : * INTERNAL_HIDDEN @endcond
1339 : : */
1340 : :
1341 : : /**
1342 : : * @defgroup timer_apis Timer APIs
1343 : : * @ingroup kernel_apis
1344 : : * @{
1345 : : */
1346 : :
1347 : : /**
1348 : : * @typedef k_timer_expiry_t
1349 : : * @brief Timer expiry function type.
1350 : : *
1351 : : * A timer's expiry function is executed by the system clock interrupt handler
1352 : : * each time the timer expires. The expiry function is optional, and is only
1353 : : * invoked if the timer has been initialized with one.
1354 : : *
1355 : : * @param timer Address of timer.
1356 : : */
1357 : : typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1358 : :
1359 : : /**
1360 : : * @typedef k_timer_stop_t
1361 : : * @brief Timer stop function type.
1362 : : *
1363 : : * A timer's stop function is executed if the timer is stopped prematurely.
1364 : : * The function runs in the context of call that stops the timer. As
1365 : : * k_timer_stop() can be invoked from an ISR, the stop function must be
1366 : : * callable from interrupt context (isr-ok).
1367 : : *
1368 : : * The stop function is optional, and is only invoked if the timer has been
1369 : : * initialized with one.
1370 : : *
1371 : : * @param timer Address of timer.
1372 : : */
1373 : : typedef void (*k_timer_stop_t)(struct k_timer *timer);
1374 : :
1375 : : /**
1376 : : * @brief Statically define and initialize a timer.
1377 : : *
1378 : : * The timer can be accessed outside the module where it is defined using:
1379 : : *
1380 : : * @code extern struct k_timer <name>; @endcode
1381 : : *
1382 : : * @param name Name of the timer variable.
1383 : : * @param expiry_fn Function to invoke each time the timer expires.
1384 : : * @param stop_fn Function to invoke if the timer is stopped while running.
1385 : : */
1386 : : #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1387 : : STRUCT_SECTION_ITERABLE(k_timer, name) = \
1388 : : Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1389 : :
1390 : : /**
1391 : : * @brief Initialize a timer.
1392 : : *
1393 : : * This routine initializes a timer, prior to its first use.
1394 : : *
1395 : : * @param timer Address of timer.
1396 : : * @param expiry_fn Function to invoke each time the timer expires.
1397 : : * @param stop_fn Function to invoke if the timer is stopped while running.
1398 : : */
1399 : : extern void k_timer_init(struct k_timer *timer,
1400 : : k_timer_expiry_t expiry_fn,
1401 : : k_timer_stop_t stop_fn);
1402 : :
1403 : : /**
1404 : : * @brief Start a timer.
1405 : : *
1406 : : * This routine starts a timer, and resets its status to zero. The timer
1407 : : * begins counting down using the specified duration and period values.
1408 : : *
1409 : : * Attempting to start a timer that is already running is permitted.
1410 : : * The timer's status is reset to zero and the timer begins counting down
1411 : : * using the new duration and period values.
1412 : : *
1413 : : * @param timer Address of timer.
1414 : : * @param duration Initial timer duration.
1415 : : * @param period Timer period.
1416 : : */
1417 : : __syscall void k_timer_start(struct k_timer *timer,
1418 : : k_timeout_t duration, k_timeout_t period);
1419 : :
1420 : : /**
1421 : : * @brief Stop a timer.
1422 : : *
1423 : : * This routine stops a running timer prematurely. The timer's stop function,
1424 : : * if one exists, is invoked by the caller.
1425 : : *
1426 : : * Attempting to stop a timer that is not running is permitted, but has no
1427 : : * effect on the timer.
1428 : : *
1429 : : * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1430 : : * be called from ISRs.
1431 : : *
1432 : : * @funcprops \isr_ok
1433 : : *
1434 : : * @param timer Address of timer.
1435 : : */
1436 : : __syscall void k_timer_stop(struct k_timer *timer);
1437 : :
1438 : : /**
1439 : : * @brief Read timer status.
1440 : : *
1441 : : * This routine reads the timer's status, which indicates the number of times
1442 : : * it has expired since its status was last read.
1443 : : *
1444 : : * Calling this routine resets the timer's status to zero.
1445 : : *
1446 : : * @param timer Address of timer.
1447 : : *
1448 : : * @return Timer status.
1449 : : */
1450 : : __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1451 : :
1452 : : /**
1453 : : * @brief Synchronize thread to timer expiration.
1454 : : *
1455 : : * This routine blocks the calling thread until the timer's status is non-zero
1456 : : * (indicating that it has expired at least once since it was last examined)
1457 : : * or the timer is stopped. If the timer status is already non-zero,
1458 : : * or the timer is already stopped, the caller continues without waiting.
1459 : : *
1460 : : * Calling this routine resets the timer's status to zero.
1461 : : *
1462 : : * This routine must not be used by interrupt handlers, since they are not
1463 : : * allowed to block.
1464 : : *
1465 : : * @param timer Address of timer.
1466 : : *
1467 : : * @return Timer status.
1468 : : */
1469 : : __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1470 : :
1471 : : #ifdef CONFIG_SYS_CLOCK_EXISTS
1472 : :
1473 : : /**
1474 : : * @brief Get next expiration time of a timer, in system ticks
1475 : : *
1476 : : * This routine returns the future system uptime reached at the next
1477 : : * time of expiration of the timer, in units of system ticks. If the
1478 : : * timer is not running, current system time is returned.
1479 : : *
1480 : : * @param timer The timer object
1481 : : * @return Uptime of expiration, in ticks
1482 : : */
1483 : : __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1484 : :
1485 : : static inline k_ticks_t z_impl_k_timer_expires_ticks(
1486 : : const struct k_timer *timer)
1487 : : {
1488 : : return z_timeout_expires(&timer->timeout);
1489 : : }
1490 : :
1491 : : /**
1492 : : * @brief Get time remaining before a timer next expires, in system ticks
1493 : : *
1494 : : * This routine computes the time remaining before a running timer
1495 : : * next expires, in units of system ticks. If the timer is not
1496 : : * running, it returns zero.
1497 : : */
1498 : : __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1499 : :
1500 : : static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1501 : : const struct k_timer *timer)
1502 : : {
1503 : : return z_timeout_remaining(&timer->timeout);
1504 : : }
1505 : :
1506 : : /**
1507 : : * @brief Get time remaining before a timer next expires.
1508 : : *
1509 : : * This routine computes the (approximate) time remaining before a running
1510 : : * timer next expires. If the timer is not running, it returns zero.
1511 : : *
1512 : : * @param timer Address of timer.
1513 : : *
1514 : : * @return Remaining time (in milliseconds).
1515 : : */
1516 : : static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1517 : : {
1518 : : return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1519 : : }
1520 : :
1521 : : #endif /* CONFIG_SYS_CLOCK_EXISTS */
1522 : :
1523 : : /**
1524 : : * @brief Associate user-specific data with a timer.
1525 : : *
1526 : : * This routine records the @a user_data with the @a timer, to be retrieved
1527 : : * later.
1528 : : *
1529 : : * It can be used e.g. in a timer handler shared across multiple subsystems to
1530 : : * retrieve data specific to the subsystem this timer is associated with.
1531 : : *
1532 : : * @param timer Address of timer.
1533 : : * @param user_data User data to associate with the timer.
1534 : : */
1535 : : __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1536 : :
1537 : : /**
1538 : : * @internal
1539 : : */
1540 : : static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1541 : : void *user_data)
1542 : : {
1543 : : timer->user_data = user_data;
1544 : : }
1545 : :
1546 : : /**
1547 : : * @brief Retrieve the user-specific data from a timer.
1548 : : *
1549 : : * @param timer Address of timer.
1550 : : *
1551 : : * @return The user data.
1552 : : */
1553 : : __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1554 : :
1555 : : static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1556 : : {
1557 : : return timer->user_data;
1558 : : }
1559 : :
1560 : : /** @} */
1561 : :
1562 : : /**
1563 : : * @addtogroup clock_apis
1564 : : * @ingroup kernel_apis
1565 : : * @{
1566 : : */
1567 : :
1568 : : /**
1569 : : * @brief Get system uptime, in system ticks.
1570 : : *
1571 : : * This routine returns the elapsed time since the system booted, in
1572 : : * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1573 : : * fundamental unit of resolution of kernel timekeeping.
1574 : : *
1575 : : * @return Current uptime in ticks.
1576 : : */
1577 : : __syscall int64_t k_uptime_ticks(void);
1578 : :
1579 : : /**
1580 : : * @brief Get system uptime.
1581 : : *
1582 : : * This routine returns the elapsed time since the system booted,
1583 : : * in milliseconds.
1584 : : *
1585 : : * @note
1586 : : * While this function returns time in milliseconds, it does
1587 : : * not mean it has millisecond resolution. The actual resolution depends on
1588 : : * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1589 : : *
1590 : : * @return Current uptime in milliseconds.
1591 : : */
1592 : 0 : static inline int64_t k_uptime_get(void)
1593 : : {
1594 : 0 : return k_ticks_to_ms_floor64(k_uptime_ticks());
1595 : : }
1596 : :
1597 : : /**
1598 : : * @brief Get system uptime (32-bit version).
1599 : : *
1600 : : * This routine returns the lower 32 bits of the system uptime in
1601 : : * milliseconds.
1602 : : *
1603 : : * Because correct conversion requires full precision of the system
1604 : : * clock there is no benefit to using this over k_uptime_get() unless
1605 : : * you know the application will never run long enough for the system
1606 : : * clock to approach 2^32 ticks. Calls to this function may involve
1607 : : * interrupt blocking and 64-bit math.
1608 : : *
1609 : : * @note
1610 : : * While this function returns time in milliseconds, it does
1611 : : * not mean it has millisecond resolution. The actual resolution depends on
1612 : : * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1613 : : *
1614 : : * @return The low 32 bits of the current uptime, in milliseconds.
1615 : : */
1616 : 0 : static inline uint32_t k_uptime_get_32(void)
1617 : : {
1618 : 0 : return (uint32_t)k_uptime_get();
1619 : : }
1620 : :
1621 : : /**
1622 : : * @brief Get elapsed time.
1623 : : *
1624 : : * This routine computes the elapsed time between the current system uptime
1625 : : * and an earlier reference time, in milliseconds.
1626 : : *
1627 : : * @param reftime Pointer to a reference time, which is updated to the current
1628 : : * uptime upon return.
1629 : : *
1630 : : * @return Elapsed time.
1631 : : */
1632 : : static inline int64_t k_uptime_delta(int64_t *reftime)
1633 : : {
1634 : : int64_t uptime, delta;
1635 : :
1636 : : uptime = k_uptime_get();
1637 : : delta = uptime - *reftime;
1638 : : *reftime = uptime;
1639 : :
1640 : : return delta;
1641 : : }
1642 : :
1643 : : /**
1644 : : * @brief Read the hardware clock.
1645 : : *
1646 : : * This routine returns the current time, as measured by the system's hardware
1647 : : * clock.
1648 : : *
1649 : : * @return Current hardware clock up-counter (in cycles).
1650 : : */
1651 : 2 : static inline uint32_t k_cycle_get_32(void)
1652 : : {
1653 : 2 : return arch_k_cycle_get_32();
1654 : : }
1655 : :
1656 : : /**
1657 : : * @brief Read the 64-bit hardware clock.
1658 : : *
1659 : : * This routine returns the current time in 64-bits, as measured by the
1660 : : * system's hardware clock, if available.
1661 : : *
1662 : : * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1663 : : *
1664 : : * @return Current hardware clock up-counter (in cycles).
1665 : : */
1666 : : static inline uint64_t k_cycle_get_64(void)
1667 : : {
1668 : : if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1669 : : __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1670 : : "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1671 : : return 0;
1672 : : }
1673 : :
1674 : : return arch_k_cycle_get_64();
1675 : : }
1676 : :
1677 : : /**
1678 : : * @}
1679 : : */
1680 : :
1681 : : /**
1682 : : * @cond INTERNAL_HIDDEN
1683 : : */
1684 : :
1685 : : struct k_queue {
1686 : : sys_sflist_t data_q;
1687 : : struct k_spinlock lock;
1688 : : _wait_q_t wait_q;
1689 : :
1690 : : _POLL_EVENT;
1691 : :
1692 : : SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1693 : : };
1694 : :
1695 : : #define Z_QUEUE_INITIALIZER(obj) \
1696 : : { \
1697 : : .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1698 : : .lock = { }, \
1699 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1700 : : _POLL_EVENT_OBJ_INIT(obj) \
1701 : : }
1702 : :
1703 : : extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1704 : :
1705 : : /**
1706 : : * INTERNAL_HIDDEN @endcond
1707 : : */
1708 : :
1709 : : /**
1710 : : * @defgroup queue_apis Queue APIs
1711 : : * @ingroup kernel_apis
1712 : : * @{
1713 : : */
1714 : :
1715 : : /**
1716 : : * @brief Initialize a queue.
1717 : : *
1718 : : * This routine initializes a queue object, prior to its first use.
1719 : : *
1720 : : * @param queue Address of the queue.
1721 : : */
1722 : : __syscall void k_queue_init(struct k_queue *queue);
1723 : :
1724 : : /**
1725 : : * @brief Cancel waiting on a queue.
1726 : : *
1727 : : * This routine causes first thread pending on @a queue, if any, to
1728 : : * return from k_queue_get() call with NULL value (as if timeout expired).
1729 : : * If the queue is being waited on by k_poll(), it will return with
1730 : : * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1731 : : * k_queue_get() will return NULL).
1732 : : *
1733 : : * @funcprops \isr_ok
1734 : : *
1735 : : * @param queue Address of the queue.
1736 : : */
1737 : : __syscall void k_queue_cancel_wait(struct k_queue *queue);
1738 : :
1739 : : /**
1740 : : * @brief Append an element to the end of a queue.
1741 : : *
1742 : : * This routine appends a data item to @a queue. A queue data item must be
1743 : : * aligned on a word boundary, and the first word of the item is reserved
1744 : : * for the kernel's use.
1745 : : *
1746 : : * @funcprops \isr_ok
1747 : : *
1748 : : * @param queue Address of the queue.
1749 : : * @param data Address of the data item.
1750 : : */
1751 : : extern void k_queue_append(struct k_queue *queue, void *data);
1752 : :
1753 : : /**
1754 : : * @brief Append an element to a queue.
1755 : : *
1756 : : * This routine appends a data item to @a queue. There is an implicit memory
1757 : : * allocation to create an additional temporary bookkeeping data structure from
1758 : : * the calling thread's resource pool, which is automatically freed when the
1759 : : * item is removed. The data itself is not copied.
1760 : : *
1761 : : * @funcprops \isr_ok
1762 : : *
1763 : : * @param queue Address of the queue.
1764 : : * @param data Address of the data item.
1765 : : *
1766 : : * @retval 0 on success
1767 : : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1768 : : */
1769 : : __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
1770 : :
1771 : : /**
1772 : : * @brief Prepend an element to a queue.
1773 : : *
1774 : : * This routine prepends a data item to @a queue. A queue data item must be
1775 : : * aligned on a word boundary, and the first word of the item is reserved
1776 : : * for the kernel's use.
1777 : : *
1778 : : * @funcprops \isr_ok
1779 : : *
1780 : : * @param queue Address of the queue.
1781 : : * @param data Address of the data item.
1782 : : */
1783 : : extern void k_queue_prepend(struct k_queue *queue, void *data);
1784 : :
1785 : : /**
1786 : : * @brief Prepend an element to a queue.
1787 : : *
1788 : : * This routine prepends a data item to @a queue. There is an implicit memory
1789 : : * allocation to create an additional temporary bookkeeping data structure from
1790 : : * the calling thread's resource pool, which is automatically freed when the
1791 : : * item is removed. The data itself is not copied.
1792 : : *
1793 : : * @funcprops \isr_ok
1794 : : *
1795 : : * @param queue Address of the queue.
1796 : : * @param data Address of the data item.
1797 : : *
1798 : : * @retval 0 on success
1799 : : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1800 : : */
1801 : : __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
1802 : :
1803 : : /**
1804 : : * @brief Inserts an element to a queue.
1805 : : *
1806 : : * This routine inserts a data item to @a queue after previous item. A queue
1807 : : * data item must be aligned on a word boundary, and the first word of
1808 : : * the item is reserved for the kernel's use.
1809 : : *
1810 : : * @funcprops \isr_ok
1811 : : *
1812 : : * @param queue Address of the queue.
1813 : : * @param prev Address of the previous data item.
1814 : : * @param data Address of the data item.
1815 : : */
1816 : : extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1817 : :
1818 : : /**
1819 : : * @brief Atomically append a list of elements to a queue.
1820 : : *
1821 : : * This routine adds a list of data items to @a queue in one operation.
1822 : : * The data items must be in a singly-linked list, with the first word
1823 : : * in each data item pointing to the next data item; the list must be
1824 : : * NULL-terminated.
1825 : : *
1826 : : * @funcprops \isr_ok
1827 : : *
1828 : : * @param queue Address of the queue.
1829 : : * @param head Pointer to first node in singly-linked list.
1830 : : * @param tail Pointer to last node in singly-linked list.
1831 : : *
1832 : : * @retval 0 on success
1833 : : * @retval -EINVAL on invalid supplied data
1834 : : *
1835 : : */
1836 : : extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
1837 : :
1838 : : /**
1839 : : * @brief Atomically add a list of elements to a queue.
1840 : : *
1841 : : * This routine adds a list of data items to @a queue in one operation.
1842 : : * The data items must be in a singly-linked list implemented using a
1843 : : * sys_slist_t object. Upon completion, the original list is empty.
1844 : : *
1845 : : * @funcprops \isr_ok
1846 : : *
1847 : : * @param queue Address of the queue.
1848 : : * @param list Pointer to sys_slist_t object.
1849 : : *
1850 : : * @retval 0 on success
1851 : : * @retval -EINVAL on invalid data
1852 : : */
1853 : : extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
1854 : :
1855 : : /**
1856 : : * @brief Get an element from a queue.
1857 : : *
1858 : : * This routine removes first data item from @a queue. The first word of the
1859 : : * data item is reserved for the kernel's use.
1860 : : *
1861 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1862 : : *
1863 : : * @funcprops \isr_ok
1864 : : *
1865 : : * @param queue Address of the queue.
1866 : : * @param timeout Non-negative waiting period to obtain a data item
1867 : : * or one of the special values K_NO_WAIT and
1868 : : * K_FOREVER.
1869 : : *
1870 : : * @return Address of the data item if successful; NULL if returned
1871 : : * without waiting, or waiting period timed out.
1872 : : */
1873 : : __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
1874 : :
1875 : : /**
1876 : : * @brief Remove an element from a queue.
1877 : : *
1878 : : * This routine removes data item from @a queue. The first word of the
1879 : : * data item is reserved for the kernel's use. Removing elements from k_queue
1880 : : * rely on sys_slist_find_and_remove which is not a constant time operation.
1881 : : *
1882 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1883 : : *
1884 : : * @funcprops \isr_ok
1885 : : *
1886 : : * @param queue Address of the queue.
1887 : : * @param data Address of the data item.
1888 : : *
1889 : : * @return true if data item was removed
1890 : : */
1891 : : bool k_queue_remove(struct k_queue *queue, void *data);
1892 : :
1893 : : /**
1894 : : * @brief Append an element to a queue only if it's not present already.
1895 : : *
1896 : : * This routine appends data item to @a queue. The first word of the data
1897 : : * item is reserved for the kernel's use. Appending elements to k_queue
1898 : : * relies on sys_slist_is_node_in_list which is not a constant time operation.
1899 : : *
1900 : : * @funcprops \isr_ok
1901 : : *
1902 : : * @param queue Address of the queue.
1903 : : * @param data Address of the data item.
1904 : : *
1905 : : * @return true if data item was added, false if not
1906 : : */
1907 : : bool k_queue_unique_append(struct k_queue *queue, void *data);
1908 : :
1909 : : /**
1910 : : * @brief Query a queue to see if it has data available.
1911 : : *
1912 : : * Note that the data might be already gone by the time this function returns
1913 : : * if other threads are also trying to read from the queue.
1914 : : *
1915 : : * @funcprops \isr_ok
1916 : : *
1917 : : * @param queue Address of the queue.
1918 : : *
1919 : : * @return Non-zero if the queue is empty.
1920 : : * @return 0 if data is available.
1921 : : */
1922 : : __syscall int k_queue_is_empty(struct k_queue *queue);
1923 : :
1924 : : static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
1925 : : {
1926 : : return (int)sys_sflist_is_empty(&queue->data_q);
1927 : : }
1928 : :
1929 : : /**
1930 : : * @brief Peek element at the head of queue.
1931 : : *
1932 : : * Return element from the head of queue without removing it.
1933 : : *
1934 : : * @param queue Address of the queue.
1935 : : *
1936 : : * @return Head element, or NULL if queue is empty.
1937 : : */
1938 : : __syscall void *k_queue_peek_head(struct k_queue *queue);
1939 : :
1940 : : /**
1941 : : * @brief Peek element at the tail of queue.
1942 : : *
1943 : : * Return element from the tail of queue without removing it.
1944 : : *
1945 : : * @param queue Address of the queue.
1946 : : *
1947 : : * @return Tail element, or NULL if queue is empty.
1948 : : */
1949 : : __syscall void *k_queue_peek_tail(struct k_queue *queue);
1950 : :
1951 : : /**
1952 : : * @brief Statically define and initialize a queue.
1953 : : *
1954 : : * The queue can be accessed outside the module where it is defined using:
1955 : : *
1956 : : * @code extern struct k_queue <name>; @endcode
1957 : : *
1958 : : * @param name Name of the queue.
1959 : : */
1960 : : #define K_QUEUE_DEFINE(name) \
1961 : : STRUCT_SECTION_ITERABLE(k_queue, name) = \
1962 : : Z_QUEUE_INITIALIZER(name)
1963 : :
1964 : : /** @} */
1965 : :
1966 : : #ifdef CONFIG_USERSPACE
1967 : : /**
1968 : : * @brief futex structure
1969 : : *
1970 : : * A k_futex is a lightweight mutual exclusion primitive designed
1971 : : * to minimize kernel involvement. Uncontended operation relies
1972 : : * only on atomic access to shared memory. k_futex are tracked as
1973 : : * kernel objects and can live in user memory so that any access
1974 : : * bypasses the kernel object permission management mechanism.
1975 : : */
1976 : : struct k_futex {
1977 : : atomic_t val;
1978 : : };
1979 : :
1980 : : /**
1981 : : * @brief futex kernel data structure
1982 : : *
1983 : : * z_futex_data are the helper data structure for k_futex to complete
1984 : : * futex contended operation on kernel side, structure z_futex_data
1985 : : * of every futex object is invisible in user mode.
1986 : : */
1987 : : struct z_futex_data {
1988 : : _wait_q_t wait_q;
1989 : : struct k_spinlock lock;
1990 : : };
1991 : :
1992 : : #define Z_FUTEX_DATA_INITIALIZER(obj) \
1993 : : { \
1994 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
1995 : : }
1996 : :
1997 : : /**
1998 : : * @defgroup futex_apis FUTEX APIs
1999 : : * @ingroup kernel_apis
2000 : : * @{
2001 : : */
2002 : :
2003 : : /**
2004 : : * @brief Pend the current thread on a futex
2005 : : *
2006 : : * Tests that the supplied futex contains the expected value, and if so,
2007 : : * goes to sleep until some other thread calls k_futex_wake() on it.
2008 : : *
2009 : : * @param futex Address of the futex.
2010 : : * @param expected Expected value of the futex, if it is different the caller
2011 : : * will not wait on it.
2012 : : * @param timeout Non-negative waiting period on the futex, or
2013 : : * one of the special values K_NO_WAIT or K_FOREVER.
2014 : : * @retval -EACCES Caller does not have read access to futex address.
2015 : : * @retval -EAGAIN If the futex value did not match the expected parameter.
2016 : : * @retval -EINVAL Futex parameter address not recognized by the kernel.
2017 : : * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2018 : : * @retval 0 if the caller went to sleep and was woken up. The caller
2019 : : * should check the futex's value on wakeup to determine if it needs
2020 : : * to block again.
2021 : : */
2022 : : __syscall int k_futex_wait(struct k_futex *futex, int expected,
2023 : : k_timeout_t timeout);
2024 : :
2025 : : /**
2026 : : * @brief Wake one/all threads pending on a futex
2027 : : *
2028 : : * Wake up the highest priority thread pending on the supplied futex, or
2029 : : * wakeup all the threads pending on the supplied futex, and the behavior
2030 : : * depends on wake_all.
2031 : : *
2032 : : * @param futex Futex to wake up pending threads.
2033 : : * @param wake_all If true, wake up all pending threads; If false,
2034 : : * wakeup the highest priority thread.
2035 : : * @retval -EACCES Caller does not have access to the futex address.
2036 : : * @retval -EINVAL Futex parameter address not recognized by the kernel.
2037 : : * @retval Number of threads that were woken up.
2038 : : */
2039 : : __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2040 : :
2041 : : /** @} */
2042 : : #endif
2043 : :
2044 : : /**
2045 : : * @defgroup event_apis Event APIs
2046 : : * @ingroup kernel_apis
2047 : : * @{
2048 : : */
2049 : :
2050 : : /**
2051 : : * Event Structure
2052 : : * @ingroup event_apis
2053 : : */
2054 : :
2055 : : struct k_event {
2056 : : _wait_q_t wait_q;
2057 : : uint32_t events;
2058 : : struct k_spinlock lock;
2059 : : };
2060 : :
2061 : : #define Z_EVENT_INITIALIZER(obj) \
2062 : : { \
2063 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2064 : : .events = 0 \
2065 : : }
2066 : :
2067 : : /**
2068 : : * @brief Initialize an event object
2069 : : *
2070 : : * This routine initializes an event object, prior to its first use.
2071 : : *
2072 : : * @param event Address of the event object.
2073 : : */
2074 : : __syscall void k_event_init(struct k_event *event);
2075 : :
2076 : : /**
2077 : : * @brief Post one or more events to an event object
2078 : : *
2079 : : * This routine posts one or more events to an event object. All tasks waiting
2080 : : * on the event object @a event whose waiting conditions become met by this
2081 : : * posting immediately unpend.
2082 : : *
2083 : : * Posting differs from setting in that posted events are merged together with
2084 : : * the current set of events tracked by the event object.
2085 : : *
2086 : : * @param event Address of the event object
2087 : : * @param events Set of events to post to @a event
2088 : : */
2089 : : __syscall void k_event_post(struct k_event *event, uint32_t events);
2090 : :
2091 : : /**
2092 : : * @brief Set the events in an event object
2093 : : *
2094 : : * This routine sets the events stored in event object to the specified value.
2095 : : * All tasks waiting on the event object @a event whose waiting conditions
2096 : : * become met by this immediately unpend.
2097 : : *
2098 : : * Setting differs from posting in that set events replace the current set of
2099 : : * events tracked by the event object.
2100 : : *
2101 : : * @param event Address of the event object
2102 : : * @param events Set of events to post to @a event
2103 : : */
2104 : : __syscall void k_event_set(struct k_event *event, uint32_t events);
2105 : :
2106 : : /**
2107 : : * @brief Wait for any of the specified events
2108 : : *
2109 : : * This routine waits on event object @a event until any of the specified
2110 : : * events have been delivered to the event object, or the maximum wait time
2111 : : * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2112 : : * events that are expressed as bits in a single 32-bit word.
2113 : : *
2114 : : * @note The caller must be careful when resetting if there are multiple threads
2115 : : * waiting for the event object @a event.
2116 : : *
2117 : : * @param event Address of the event object
2118 : : * @param events Set of desired events on which to wait
2119 : : * @param reset If true, clear the set of events tracked by the event object
2120 : : * before waiting. If false, do not clear the events.
2121 : : * @param timeout Waiting period for the desired set of events or one of the
2122 : : * special values K_NO_WAIT and K_FOREVER.
2123 : : *
2124 : : * @retval set of matching events upon success
2125 : : * @retval 0 if matching events were not received within the specified time
2126 : : */
2127 : : __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2128 : : bool reset, k_timeout_t timeout);
2129 : :
2130 : : /**
2131 : : * @brief Wait for any of the specified events
2132 : : *
2133 : : * This routine waits on event object @a event until all of the specified
2134 : : * events have been delivered to the event object, or the maximum wait time
2135 : : * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2136 : : * events that are expressed as bits in a single 32-bit word.
2137 : : *
2138 : : * @note The caller must be careful when resetting if there are multiple threads
2139 : : * waiting for the event object @a event.
2140 : : *
2141 : : * @param event Address of the event object
2142 : : * @param events Set of desired events on which to wait
2143 : : * @param reset If true, clear the set of events tracked by the event object
2144 : : * before waiting. If false, do not clear the events.
2145 : : * @param timeout Waiting period for the desired set of events or one of the
2146 : : * special values K_NO_WAIT and K_FOREVER.
2147 : : *
2148 : : * @retval set of matching events upon success
2149 : : * @retval 0 if matching events were not received within the specified time
2150 : : */
2151 : : __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2152 : : bool reset, k_timeout_t timeout);
2153 : :
2154 : : /**
2155 : : * @brief Statically define and initialize an event object
2156 : : *
2157 : : * The event can be accessed outside the module where it is defined using:
2158 : : *
2159 : : * @code extern struct k_event <name>; @endcode
2160 : : *
2161 : : * @param name Name of the event object.
2162 : : */
2163 : : #define K_EVENT_DEFINE(name) \
2164 : : STRUCT_SECTION_ITERABLE(k_event, name) = \
2165 : : Z_EVENT_INITIALIZER(name);
2166 : :
2167 : : /** @} */
2168 : :
2169 : : struct k_fifo {
2170 : : struct k_queue _queue;
2171 : : };
2172 : :
2173 : : /**
2174 : : * @cond INTERNAL_HIDDEN
2175 : : */
2176 : : #define Z_FIFO_INITIALIZER(obj) \
2177 : : { \
2178 : : ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2179 : : }
2180 : :
2181 : : /**
2182 : : * INTERNAL_HIDDEN @endcond
2183 : : */
2184 : :
2185 : : /**
2186 : : * @defgroup fifo_apis FIFO APIs
2187 : : * @ingroup kernel_apis
2188 : : * @{
2189 : : */
2190 : :
2191 : : /**
2192 : : * @brief Initialize a FIFO queue.
2193 : : *
2194 : : * This routine initializes a FIFO queue, prior to its first use.
2195 : : *
2196 : : * @param fifo Address of the FIFO queue.
2197 : : */
2198 : : #define k_fifo_init(fifo) \
2199 : : ({ \
2200 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2201 : : k_queue_init(&(fifo)->_queue); \
2202 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2203 : : })
2204 : :
2205 : : /**
2206 : : * @brief Cancel waiting on a FIFO queue.
2207 : : *
2208 : : * This routine causes first thread pending on @a fifo, if any, to
2209 : : * return from k_fifo_get() call with NULL value (as if timeout
2210 : : * expired).
2211 : : *
2212 : : * @funcprops \isr_ok
2213 : : *
2214 : : * @param fifo Address of the FIFO queue.
2215 : : */
2216 : : #define k_fifo_cancel_wait(fifo) \
2217 : : ({ \
2218 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2219 : : k_queue_cancel_wait(&(fifo)->_queue); \
2220 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2221 : : })
2222 : :
2223 : : /**
2224 : : * @brief Add an element to a FIFO queue.
2225 : : *
2226 : : * This routine adds a data item to @a fifo. A FIFO data item must be
2227 : : * aligned on a word boundary, and the first word of the item is reserved
2228 : : * for the kernel's use.
2229 : : *
2230 : : * @funcprops \isr_ok
2231 : : *
2232 : : * @param fifo Address of the FIFO.
2233 : : * @param data Address of the data item.
2234 : : */
2235 : : #define k_fifo_put(fifo, data) \
2236 : : ({ \
2237 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2238 : : k_queue_append(&(fifo)->_queue, data); \
2239 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2240 : : })
2241 : :
2242 : : /**
2243 : : * @brief Add an element to a FIFO queue.
2244 : : *
2245 : : * This routine adds a data item to @a fifo. There is an implicit memory
2246 : : * allocation to create an additional temporary bookkeeping data structure from
2247 : : * the calling thread's resource pool, which is automatically freed when the
2248 : : * item is removed. The data itself is not copied.
2249 : : *
2250 : : * @funcprops \isr_ok
2251 : : *
2252 : : * @param fifo Address of the FIFO.
2253 : : * @param data Address of the data item.
2254 : : *
2255 : : * @retval 0 on success
2256 : : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2257 : : */
2258 : : #define k_fifo_alloc_put(fifo, data) \
2259 : : ({ \
2260 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2261 : : int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2262 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2263 : : ret; \
2264 : : })
2265 : :
2266 : : /**
2267 : : * @brief Atomically add a list of elements to a FIFO.
2268 : : *
2269 : : * This routine adds a list of data items to @a fifo in one operation.
2270 : : * The data items must be in a singly-linked list, with the first word of
2271 : : * each data item pointing to the next data item; the list must be
2272 : : * NULL-terminated.
2273 : : *
2274 : : * @funcprops \isr_ok
2275 : : *
2276 : : * @param fifo Address of the FIFO queue.
2277 : : * @param head Pointer to first node in singly-linked list.
2278 : : * @param tail Pointer to last node in singly-linked list.
2279 : : */
2280 : : #define k_fifo_put_list(fifo, head, tail) \
2281 : : ({ \
2282 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2283 : : k_queue_append_list(&(fifo)->_queue, head, tail); \
2284 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2285 : : })
2286 : :
2287 : : /**
2288 : : * @brief Atomically add a list of elements to a FIFO queue.
2289 : : *
2290 : : * This routine adds a list of data items to @a fifo in one operation.
2291 : : * The data items must be in a singly-linked list implemented using a
2292 : : * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2293 : : * and must be re-initialized via sys_slist_init().
2294 : : *
2295 : : * @funcprops \isr_ok
2296 : : *
2297 : : * @param fifo Address of the FIFO queue.
2298 : : * @param list Pointer to sys_slist_t object.
2299 : : */
2300 : : #define k_fifo_put_slist(fifo, list) \
2301 : : ({ \
2302 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2303 : : k_queue_merge_slist(&(fifo)->_queue, list); \
2304 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2305 : : })
2306 : :
2307 : : /**
2308 : : * @brief Get an element from a FIFO queue.
2309 : : *
2310 : : * This routine removes a data item from @a fifo in a "first in, first out"
2311 : : * manner. The first word of the data item is reserved for the kernel's use.
2312 : : *
2313 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2314 : : *
2315 : : * @funcprops \isr_ok
2316 : : *
2317 : : * @param fifo Address of the FIFO queue.
2318 : : * @param timeout Waiting period to obtain a data item,
2319 : : * or one of the special values K_NO_WAIT and K_FOREVER.
2320 : : *
2321 : : * @return Address of the data item if successful; NULL if returned
2322 : : * without waiting, or waiting period timed out.
2323 : : */
2324 : : #define k_fifo_get(fifo, timeout) \
2325 : : ({ \
2326 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2327 : : void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2328 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2329 : : ret; \
2330 : : })
2331 : :
2332 : : /**
2333 : : * @brief Query a FIFO queue to see if it has data available.
2334 : : *
2335 : : * Note that the data might be already gone by the time this function returns
2336 : : * if other threads is also trying to read from the FIFO.
2337 : : *
2338 : : * @funcprops \isr_ok
2339 : : *
2340 : : * @param fifo Address of the FIFO queue.
2341 : : *
2342 : : * @return Non-zero if the FIFO queue is empty.
2343 : : * @return 0 if data is available.
2344 : : */
2345 : : #define k_fifo_is_empty(fifo) \
2346 : : k_queue_is_empty(&(fifo)->_queue)
2347 : :
2348 : : /**
2349 : : * @brief Peek element at the head of a FIFO queue.
2350 : : *
2351 : : * Return element from the head of FIFO queue without removing it. A usecase
2352 : : * for this is if elements of the FIFO object are themselves containers. Then
2353 : : * on each iteration of processing, a head container will be peeked,
2354 : : * and some data processed out of it, and only if the container is empty,
2355 : : * it will be completely remove from the FIFO queue.
2356 : : *
2357 : : * @param fifo Address of the FIFO queue.
2358 : : *
2359 : : * @return Head element, or NULL if the FIFO queue is empty.
2360 : : */
2361 : : #define k_fifo_peek_head(fifo) \
2362 : : ({ \
2363 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2364 : : void *ret = k_queue_peek_head(&(fifo)->_queue); \
2365 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2366 : : ret; \
2367 : : })
2368 : :
2369 : : /**
2370 : : * @brief Peek element at the tail of FIFO queue.
2371 : : *
2372 : : * Return element from the tail of FIFO queue (without removing it). A usecase
2373 : : * for this is if elements of the FIFO queue are themselves containers. Then
2374 : : * it may be useful to add more data to the last container in a FIFO queue.
2375 : : *
2376 : : * @param fifo Address of the FIFO queue.
2377 : : *
2378 : : * @return Tail element, or NULL if a FIFO queue is empty.
2379 : : */
2380 : : #define k_fifo_peek_tail(fifo) \
2381 : : ({ \
2382 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2383 : : void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2384 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2385 : : ret; \
2386 : : })
2387 : :
2388 : : /**
2389 : : * @brief Statically define and initialize a FIFO queue.
2390 : : *
2391 : : * The FIFO queue can be accessed outside the module where it is defined using:
2392 : : *
2393 : : * @code extern struct k_fifo <name>; @endcode
2394 : : *
2395 : : * @param name Name of the FIFO queue.
2396 : : */
2397 : : #define K_FIFO_DEFINE(name) \
2398 : : STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
2399 : : Z_FIFO_INITIALIZER(name)
2400 : :
2401 : : /** @} */
2402 : :
2403 : : struct k_lifo {
2404 : : struct k_queue _queue;
2405 : : };
2406 : :
2407 : : /**
2408 : : * @cond INTERNAL_HIDDEN
2409 : : */
2410 : :
2411 : : #define Z_LIFO_INITIALIZER(obj) \
2412 : : { \
2413 : : ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2414 : : }
2415 : :
2416 : : /**
2417 : : * INTERNAL_HIDDEN @endcond
2418 : : */
2419 : :
2420 : : /**
2421 : : * @defgroup lifo_apis LIFO APIs
2422 : : * @ingroup kernel_apis
2423 : : * @{
2424 : : */
2425 : :
2426 : : /**
2427 : : * @brief Initialize a LIFO queue.
2428 : : *
2429 : : * This routine initializes a LIFO queue object, prior to its first use.
2430 : : *
2431 : : * @param lifo Address of the LIFO queue.
2432 : : */
2433 : : #define k_lifo_init(lifo) \
2434 : : ({ \
2435 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2436 : : k_queue_init(&(lifo)->_queue); \
2437 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2438 : : })
2439 : :
2440 : : /**
2441 : : * @brief Add an element to a LIFO queue.
2442 : : *
2443 : : * This routine adds a data item to @a lifo. A LIFO queue data item must be
2444 : : * aligned on a word boundary, and the first word of the item is
2445 : : * reserved for the kernel's use.
2446 : : *
2447 : : * @funcprops \isr_ok
2448 : : *
2449 : : * @param lifo Address of the LIFO queue.
2450 : : * @param data Address of the data item.
2451 : : */
2452 : : #define k_lifo_put(lifo, data) \
2453 : : ({ \
2454 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2455 : : k_queue_prepend(&(lifo)->_queue, data); \
2456 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2457 : : })
2458 : :
2459 : : /**
2460 : : * @brief Add an element to a LIFO queue.
2461 : : *
2462 : : * This routine adds a data item to @a lifo. There is an implicit memory
2463 : : * allocation to create an additional temporary bookkeeping data structure from
2464 : : * the calling thread's resource pool, which is automatically freed when the
2465 : : * item is removed. The data itself is not copied.
2466 : : *
2467 : : * @funcprops \isr_ok
2468 : : *
2469 : : * @param lifo Address of the LIFO.
2470 : : * @param data Address of the data item.
2471 : : *
2472 : : * @retval 0 on success
2473 : : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2474 : : */
2475 : : #define k_lifo_alloc_put(lifo, data) \
2476 : : ({ \
2477 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2478 : : int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2479 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2480 : : ret; \
2481 : : })
2482 : :
2483 : : /**
2484 : : * @brief Get an element from a LIFO queue.
2485 : : *
2486 : : * This routine removes a data item from @a LIFO in a "last in, first out"
2487 : : * manner. The first word of the data item is reserved for the kernel's use.
2488 : : *
2489 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2490 : : *
2491 : : * @funcprops \isr_ok
2492 : : *
2493 : : * @param lifo Address of the LIFO queue.
2494 : : * @param timeout Waiting period to obtain a data item,
2495 : : * or one of the special values K_NO_WAIT and K_FOREVER.
2496 : : *
2497 : : * @return Address of the data item if successful; NULL if returned
2498 : : * without waiting, or waiting period timed out.
2499 : : */
2500 : : #define k_lifo_get(lifo, timeout) \
2501 : : ({ \
2502 : : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2503 : : void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2504 : : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2505 : : ret; \
2506 : : })
2507 : :
2508 : : /**
2509 : : * @brief Statically define and initialize a LIFO queue.
2510 : : *
2511 : : * The LIFO queue can be accessed outside the module where it is defined using:
2512 : : *
2513 : : * @code extern struct k_lifo <name>; @endcode
2514 : : *
2515 : : * @param name Name of the fifo.
2516 : : */
2517 : : #define K_LIFO_DEFINE(name) \
2518 : : STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
2519 : : Z_LIFO_INITIALIZER(name)
2520 : :
2521 : : /** @} */
2522 : :
2523 : : /**
2524 : : * @cond INTERNAL_HIDDEN
2525 : : */
2526 : : #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2527 : :
2528 : : typedef uintptr_t stack_data_t;
2529 : :
2530 : : struct k_stack {
2531 : : _wait_q_t wait_q;
2532 : : struct k_spinlock lock;
2533 : : stack_data_t *base, *next, *top;
2534 : :
2535 : : uint8_t flags;
2536 : :
2537 : : SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2538 : : };
2539 : :
2540 : : #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2541 : : { \
2542 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2543 : : .base = stack_buffer, \
2544 : : .next = stack_buffer, \
2545 : : .top = stack_buffer + stack_num_entries, \
2546 : : }
2547 : :
2548 : : /**
2549 : : * INTERNAL_HIDDEN @endcond
2550 : : */
2551 : :
2552 : : /**
2553 : : * @defgroup stack_apis Stack APIs
2554 : : * @ingroup kernel_apis
2555 : : * @{
2556 : : */
2557 : :
2558 : : /**
2559 : : * @brief Initialize a stack.
2560 : : *
2561 : : * This routine initializes a stack object, prior to its first use.
2562 : : *
2563 : : * @param stack Address of the stack.
2564 : : * @param buffer Address of array used to hold stacked values.
2565 : : * @param num_entries Maximum number of values that can be stacked.
2566 : : */
2567 : : void k_stack_init(struct k_stack *stack,
2568 : : stack_data_t *buffer, uint32_t num_entries);
2569 : :
2570 : :
2571 : : /**
2572 : : * @brief Initialize a stack.
2573 : : *
2574 : : * This routine initializes a stack object, prior to its first use. Internal
2575 : : * buffers will be allocated from the calling thread's resource pool.
2576 : : * This memory will be released if k_stack_cleanup() is called, or
2577 : : * userspace is enabled and the stack object loses all references to it.
2578 : : *
2579 : : * @param stack Address of the stack.
2580 : : * @param num_entries Maximum number of values that can be stacked.
2581 : : *
2582 : : * @return -ENOMEM if memory couldn't be allocated
2583 : : */
2584 : :
2585 : : __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2586 : : uint32_t num_entries);
2587 : :
2588 : : /**
2589 : : * @brief Release a stack's allocated buffer
2590 : : *
2591 : : * If a stack object was given a dynamically allocated buffer via
2592 : : * k_stack_alloc_init(), this will free it. This function does nothing
2593 : : * if the buffer wasn't dynamically allocated.
2594 : : *
2595 : : * @param stack Address of the stack.
2596 : : * @retval 0 on success
2597 : : * @retval -EAGAIN when object is still in use
2598 : : */
2599 : : int k_stack_cleanup(struct k_stack *stack);
2600 : :
2601 : : /**
2602 : : * @brief Push an element onto a stack.
2603 : : *
2604 : : * This routine adds a stack_data_t value @a data to @a stack.
2605 : : *
2606 : : * @funcprops \isr_ok
2607 : : *
2608 : : * @param stack Address of the stack.
2609 : : * @param data Value to push onto the stack.
2610 : : *
2611 : : * @retval 0 on success
2612 : : * @retval -ENOMEM if stack is full
2613 : : */
2614 : : __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2615 : :
2616 : : /**
2617 : : * @brief Pop an element from a stack.
2618 : : *
2619 : : * This routine removes a stack_data_t value from @a stack in a "last in,
2620 : : * first out" manner and stores the value in @a data.
2621 : : *
2622 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2623 : : *
2624 : : * @funcprops \isr_ok
2625 : : *
2626 : : * @param stack Address of the stack.
2627 : : * @param data Address of area to hold the value popped from the stack.
2628 : : * @param timeout Waiting period to obtain a value,
2629 : : * or one of the special values K_NO_WAIT and
2630 : : * K_FOREVER.
2631 : : *
2632 : : * @retval 0 Element popped from stack.
2633 : : * @retval -EBUSY Returned without waiting.
2634 : : * @retval -EAGAIN Waiting period timed out.
2635 : : */
2636 : : __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2637 : : k_timeout_t timeout);
2638 : :
2639 : : /**
2640 : : * @brief Statically define and initialize a stack
2641 : : *
2642 : : * The stack can be accessed outside the module where it is defined using:
2643 : : *
2644 : : * @code extern struct k_stack <name>; @endcode
2645 : : *
2646 : : * @param name Name of the stack.
2647 : : * @param stack_num_entries Maximum number of values that can be stacked.
2648 : : */
2649 : : #define K_STACK_DEFINE(name, stack_num_entries) \
2650 : : stack_data_t __noinit \
2651 : : _k_stack_buf_##name[stack_num_entries]; \
2652 : : STRUCT_SECTION_ITERABLE(k_stack, name) = \
2653 : : Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2654 : : stack_num_entries)
2655 : :
2656 : : /** @} */
2657 : :
2658 : : /**
2659 : : * @cond INTERNAL_HIDDEN
2660 : : */
2661 : :
2662 : : struct k_work;
2663 : : struct k_work_q;
2664 : : struct k_work_queue_config;
2665 : : struct k_delayed_work;
2666 : : extern struct k_work_q k_sys_work_q;
2667 : :
2668 : : /**
2669 : : * INTERNAL_HIDDEN @endcond
2670 : : */
2671 : :
2672 : : /**
2673 : : * @defgroup mutex_apis Mutex APIs
2674 : : * @ingroup kernel_apis
2675 : : * @{
2676 : : */
2677 : :
2678 : : /**
2679 : : * Mutex Structure
2680 : : * @ingroup mutex_apis
2681 : : */
2682 : : struct k_mutex {
2683 : : /** Mutex wait queue */
2684 : : _wait_q_t wait_q;
2685 : : /** Mutex owner */
2686 : : struct k_thread *owner;
2687 : :
2688 : : /** Current lock count */
2689 : : uint32_t lock_count;
2690 : :
2691 : : /** Original thread priority */
2692 : : int owner_orig_prio;
2693 : :
2694 : : SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
2695 : : };
2696 : :
2697 : : /**
2698 : : * @cond INTERNAL_HIDDEN
2699 : : */
2700 : : #define Z_MUTEX_INITIALIZER(obj) \
2701 : : { \
2702 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2703 : : .owner = NULL, \
2704 : : .lock_count = 0, \
2705 : : .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
2706 : : }
2707 : :
2708 : : /**
2709 : : * INTERNAL_HIDDEN @endcond
2710 : : */
2711 : :
2712 : : /**
2713 : : * @brief Statically define and initialize a mutex.
2714 : : *
2715 : : * The mutex can be accessed outside the module where it is defined using:
2716 : : *
2717 : : * @code extern struct k_mutex <name>; @endcode
2718 : : *
2719 : : * @param name Name of the mutex.
2720 : : */
2721 : : #define K_MUTEX_DEFINE(name) \
2722 : : STRUCT_SECTION_ITERABLE(k_mutex, name) = \
2723 : : Z_MUTEX_INITIALIZER(name)
2724 : :
2725 : : /**
2726 : : * @brief Initialize a mutex.
2727 : : *
2728 : : * This routine initializes a mutex object, prior to its first use.
2729 : : *
2730 : : * Upon completion, the mutex is available and does not have an owner.
2731 : : *
2732 : : * @param mutex Address of the mutex.
2733 : : *
2734 : : * @retval 0 Mutex object created
2735 : : *
2736 : : */
2737 : : __syscall int k_mutex_init(struct k_mutex *mutex);
2738 : :
2739 : :
2740 : : /**
2741 : : * @brief Lock a mutex.
2742 : : *
2743 : : * This routine locks @a mutex. If the mutex is locked by another thread,
2744 : : * the calling thread waits until the mutex becomes available or until
2745 : : * a timeout occurs.
2746 : : *
2747 : : * A thread is permitted to lock a mutex it has already locked. The operation
2748 : : * completes immediately and the lock count is increased by 1.
2749 : : *
2750 : : * Mutexes may not be locked in ISRs.
2751 : : *
2752 : : * @param mutex Address of the mutex.
2753 : : * @param timeout Waiting period to lock the mutex,
2754 : : * or one of the special values K_NO_WAIT and
2755 : : * K_FOREVER.
2756 : : *
2757 : : * @retval 0 Mutex locked.
2758 : : * @retval -EBUSY Returned without waiting.
2759 : : * @retval -EAGAIN Waiting period timed out.
2760 : : */
2761 : : __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
2762 : :
2763 : : /**
2764 : : * @brief Unlock a mutex.
2765 : : *
2766 : : * This routine unlocks @a mutex. The mutex must already be locked by the
2767 : : * calling thread.
2768 : : *
2769 : : * The mutex cannot be claimed by another thread until it has been unlocked by
2770 : : * the calling thread as many times as it was previously locked by that
2771 : : * thread.
2772 : : *
2773 : : * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2774 : : * in thread context due to ownership and priority inheritance semantics.
2775 : : *
2776 : : * @param mutex Address of the mutex.
2777 : : *
2778 : : * @retval 0 Mutex unlocked.
2779 : : * @retval -EPERM The current thread does not own the mutex
2780 : : * @retval -EINVAL The mutex is not locked
2781 : : *
2782 : : */
2783 : : __syscall int k_mutex_unlock(struct k_mutex *mutex);
2784 : :
2785 : : /**
2786 : : * @}
2787 : : */
2788 : :
2789 : :
2790 : : struct k_condvar {
2791 : : _wait_q_t wait_q;
2792 : : };
2793 : :
2794 : : #define Z_CONDVAR_INITIALIZER(obj) \
2795 : : { \
2796 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2797 : : }
2798 : :
2799 : : /**
2800 : : * @defgroup condvar_apis Condition Variables APIs
2801 : : * @ingroup kernel_apis
2802 : : * @{
2803 : : */
2804 : :
2805 : : /**
2806 : : * @brief Initialize a condition variable
2807 : : *
2808 : : * @param condvar pointer to a @p k_condvar structure
2809 : : * @retval 0 Condition variable created successfully
2810 : : */
2811 : : __syscall int k_condvar_init(struct k_condvar *condvar);
2812 : :
2813 : : /**
2814 : : * @brief Signals one thread that is pending on the condition variable
2815 : : *
2816 : : * @param condvar pointer to a @p k_condvar structure
2817 : : * @retval 0 On success
2818 : : */
2819 : : __syscall int k_condvar_signal(struct k_condvar *condvar);
2820 : :
2821 : : /**
2822 : : * @brief Unblock all threads that are pending on the condition
2823 : : * variable
2824 : : *
2825 : : * @param condvar pointer to a @p k_condvar structure
2826 : : * @return An integer with number of woken threads on success
2827 : : */
2828 : : __syscall int k_condvar_broadcast(struct k_condvar *condvar);
2829 : :
2830 : : /**
2831 : : * @brief Waits on the condition variable releasing the mutex lock
2832 : : *
2833 : : * Atomically releases the currently owned mutex, blocks the current thread
2834 : : * waiting on the condition variable specified by @a condvar,
2835 : : * and finally acquires the mutex again.
2836 : : *
2837 : : * The waiting thread unblocks only after another thread calls
2838 : : * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2839 : : *
2840 : : * @param condvar pointer to a @p k_condvar structure
2841 : : * @param mutex Address of the mutex.
2842 : : * @param timeout Waiting period for the condition variable
2843 : : * or one of the special values K_NO_WAIT and K_FOREVER.
2844 : : * @retval 0 On success
2845 : : * @retval -EAGAIN Waiting period timed out.
2846 : : */
2847 : : __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2848 : : k_timeout_t timeout);
2849 : :
2850 : : /**
2851 : : * @brief Statically define and initialize a condition variable.
2852 : : *
2853 : : * The condition variable can be accessed outside the module where it is
2854 : : * defined using:
2855 : : *
2856 : : * @code extern struct k_condvar <name>; @endcode
2857 : : *
2858 : : * @param name Name of the condition variable.
2859 : : */
2860 : : #define K_CONDVAR_DEFINE(name) \
2861 : : STRUCT_SECTION_ITERABLE(k_condvar, name) = \
2862 : : Z_CONDVAR_INITIALIZER(name)
2863 : : /**
2864 : : * @}
2865 : : */
2866 : :
2867 : : /**
2868 : : * @cond INTERNAL_HIDDEN
2869 : : */
2870 : :
2871 : : struct k_sem {
2872 : : _wait_q_t wait_q;
2873 : : unsigned int count;
2874 : : unsigned int limit;
2875 : :
2876 : : _POLL_EVENT;
2877 : :
2878 : : SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
2879 : :
2880 : : };
2881 : :
2882 : : #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
2883 : : { \
2884 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2885 : : .count = initial_count, \
2886 : : .limit = count_limit, \
2887 : : _POLL_EVENT_OBJ_INIT(obj) \
2888 : : }
2889 : :
2890 : : /**
2891 : : * INTERNAL_HIDDEN @endcond
2892 : : */
2893 : :
2894 : : /**
2895 : : * @defgroup semaphore_apis Semaphore APIs
2896 : : * @ingroup kernel_apis
2897 : : * @{
2898 : : */
2899 : :
2900 : : /**
2901 : : * @brief Maximum limit value allowed for a semaphore.
2902 : : *
2903 : : * This is intended for use when a semaphore does not have
2904 : : * an explicit maximum limit, and instead is just used for
2905 : : * counting purposes.
2906 : : *
2907 : : */
2908 : : #define K_SEM_MAX_LIMIT UINT_MAX
2909 : :
2910 : : /**
2911 : : * @brief Initialize a semaphore.
2912 : : *
2913 : : * This routine initializes a semaphore object, prior to its first use.
2914 : : *
2915 : : * @param sem Address of the semaphore.
2916 : : * @param initial_count Initial semaphore count.
2917 : : * @param limit Maximum permitted semaphore count.
2918 : : *
2919 : : * @see K_SEM_MAX_LIMIT
2920 : : *
2921 : : * @retval 0 Semaphore created successfully
2922 : : * @retval -EINVAL Invalid values
2923 : : *
2924 : : */
2925 : : __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
2926 : : unsigned int limit);
2927 : :
2928 : : /**
2929 : : * @brief Take a semaphore.
2930 : : *
2931 : : * This routine takes @a sem.
2932 : : *
2933 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2934 : : *
2935 : : * @funcprops \isr_ok
2936 : : *
2937 : : * @param sem Address of the semaphore.
2938 : : * @param timeout Waiting period to take the semaphore,
2939 : : * or one of the special values K_NO_WAIT and K_FOREVER.
2940 : : *
2941 : : * @retval 0 Semaphore taken.
2942 : : * @retval -EBUSY Returned without waiting.
2943 : : * @retval -EAGAIN Waiting period timed out,
2944 : : * or the semaphore was reset during the waiting period.
2945 : : */
2946 : : __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
2947 : :
2948 : : /**
2949 : : * @brief Give a semaphore.
2950 : : *
2951 : : * This routine gives @a sem, unless the semaphore is already at its maximum
2952 : : * permitted count.
2953 : : *
2954 : : * @funcprops \isr_ok
2955 : : *
2956 : : * @param sem Address of the semaphore.
2957 : : */
2958 : : __syscall void k_sem_give(struct k_sem *sem);
2959 : :
2960 : : /**
2961 : : * @brief Resets a semaphore's count to zero.
2962 : : *
2963 : : * This routine sets the count of @a sem to zero.
2964 : : * Any outstanding semaphore takes will be aborted
2965 : : * with -EAGAIN.
2966 : : *
2967 : : * @param sem Address of the semaphore.
2968 : : */
2969 : : __syscall void k_sem_reset(struct k_sem *sem);
2970 : :
2971 : : /**
2972 : : * @brief Get a semaphore's count.
2973 : : *
2974 : : * This routine returns the current count of @a sem.
2975 : : *
2976 : : * @param sem Address of the semaphore.
2977 : : *
2978 : : * @return Current semaphore count.
2979 : : */
2980 : : __syscall unsigned int k_sem_count_get(struct k_sem *sem);
2981 : :
2982 : : /**
2983 : : * @internal
2984 : : */
2985 : 0 : static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
2986 : : {
2987 : 0 : return sem->count;
2988 : : }
2989 : :
2990 : : /**
2991 : : * @brief Statically define and initialize a semaphore.
2992 : : *
2993 : : * The semaphore can be accessed outside the module where it is defined using:
2994 : : *
2995 : : * @code extern struct k_sem <name>; @endcode
2996 : : *
2997 : : * @param name Name of the semaphore.
2998 : : * @param initial_count Initial semaphore count.
2999 : : * @param count_limit Maximum permitted semaphore count.
3000 : : */
3001 : : #define K_SEM_DEFINE(name, initial_count, count_limit) \
3002 : : STRUCT_SECTION_ITERABLE(k_sem, name) = \
3003 : : Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3004 : : BUILD_ASSERT(((count_limit) != 0) && \
3005 : : ((initial_count) <= (count_limit)) && \
3006 : : ((count_limit) <= K_SEM_MAX_LIMIT));
3007 : :
3008 : : /** @} */
3009 : :
3010 : : /**
3011 : : * @cond INTERNAL_HIDDEN
3012 : : */
3013 : :
3014 : : struct k_work_delayable;
3015 : : struct k_work_sync;
3016 : :
3017 : : /**
3018 : : * INTERNAL_HIDDEN @endcond
3019 : : */
3020 : :
3021 : : /**
3022 : : * @defgroup workqueue_apis Work Queue APIs
3023 : : * @ingroup kernel_apis
3024 : : * @{
3025 : : */
3026 : :
3027 : : /** @brief The signature for a work item handler function.
3028 : : *
3029 : : * The function will be invoked by the thread animating a work queue.
3030 : : *
3031 : : * @param work the work item that provided the handler.
3032 : : */
3033 : : typedef void (*k_work_handler_t)(struct k_work *work);
3034 : :
3035 : : /** @brief Initialize a (non-delayable) work structure.
3036 : : *
3037 : : * This must be invoked before submitting a work structure for the first time.
3038 : : * It need not be invoked again on the same work structure. It can be
3039 : : * re-invoked to change the associated handler, but this must be done when the
3040 : : * work item is idle.
3041 : : *
3042 : : * @funcprops \isr_ok
3043 : : *
3044 : : * @param work the work structure to be initialized.
3045 : : *
3046 : : * @param handler the handler to be invoked by the work item.
3047 : : */
3048 : : void k_work_init(struct k_work *work,
3049 : : k_work_handler_t handler);
3050 : :
3051 : : /** @brief Busy state flags from the work item.
3052 : : *
3053 : : * A zero return value indicates the work item appears to be idle.
3054 : : *
3055 : : * @note This is a live snapshot of state, which may change before the result
3056 : : * is checked. Use locks where appropriate.
3057 : : *
3058 : : * @funcprops \isr_ok
3059 : : *
3060 : : * @param work pointer to the work item.
3061 : : *
3062 : : * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3063 : : * K_WORK_RUNNING, and K_WORK_CANCELING.
3064 : : */
3065 : : int k_work_busy_get(const struct k_work *work);
3066 : :
3067 : : /** @brief Test whether a work item is currently pending.
3068 : : *
3069 : : * Wrapper to determine whether a work item is in a non-idle dstate.
3070 : : *
3071 : : * @note This is a live snapshot of state, which may change before the result
3072 : : * is checked. Use locks where appropriate.
3073 : : *
3074 : : * @funcprops \isr_ok
3075 : : *
3076 : : * @param work pointer to the work item.
3077 : : *
3078 : : * @return true if and only if k_work_busy_get() returns a non-zero value.
3079 : : */
3080 : : static inline bool k_work_is_pending(const struct k_work *work);
3081 : :
3082 : : /** @brief Submit a work item to a queue.
3083 : : *
3084 : : * @param queue pointer to the work queue on which the item should run. If
3085 : : * NULL the queue from the most recent submission will be used.
3086 : : *
3087 : : * @funcprops \isr_ok
3088 : : *
3089 : : * @param work pointer to the work item.
3090 : : *
3091 : : * @retval 0 if work was already submitted to a queue
3092 : : * @retval 1 if work was not submitted and has been queued to @p queue
3093 : : * @retval 2 if work was running and has been queued to the queue that was
3094 : : * running it
3095 : : * @retval -EBUSY
3096 : : * * if work submission was rejected because the work item is cancelling; or
3097 : : * * @p queue is draining; or
3098 : : * * @p queue is plugged.
3099 : : * @retval -EINVAL if @p queue is null and the work item has never been run.
3100 : : * @retval -ENODEV if @p queue has not been started.
3101 : : */
3102 : : int k_work_submit_to_queue(struct k_work_q *queue,
3103 : : struct k_work *work);
3104 : :
3105 : : /** @brief Submit a work item to the system queue.
3106 : : *
3107 : : * @funcprops \isr_ok
3108 : : *
3109 : : * @param work pointer to the work item.
3110 : : *
3111 : : * @return as with k_work_submit_to_queue().
3112 : : */
3113 : : extern int k_work_submit(struct k_work *work);
3114 : :
3115 : : /** @brief Wait for last-submitted instance to complete.
3116 : : *
3117 : : * Resubmissions may occur while waiting, including chained submissions (from
3118 : : * within the handler).
3119 : : *
3120 : : * @note Be careful of caller and work queue thread relative priority. If
3121 : : * this function sleeps it will not return until the work queue thread
3122 : : * completes the tasks that allow this thread to resume.
3123 : : *
3124 : : * @note Behavior is undefined if this function is invoked on @p work from a
3125 : : * work queue running @p work.
3126 : : *
3127 : : * @param work pointer to the work item.
3128 : : *
3129 : : * @param sync pointer to an opaque item containing state related to the
3130 : : * pending cancellation. The object must persist until the call returns, and
3131 : : * be accessible from both the caller thread and the work queue thread. The
3132 : : * object must not be used for any other flush or cancel operation until this
3133 : : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3134 : : * must be allocated in coherent memory.
3135 : : *
3136 : : * @retval true if call had to wait for completion
3137 : : * @retval false if work was already idle
3138 : : */
3139 : : bool k_work_flush(struct k_work *work,
3140 : : struct k_work_sync *sync);
3141 : :
3142 : : /** @brief Cancel a work item.
3143 : : *
3144 : : * This attempts to prevent a pending (non-delayable) work item from being
3145 : : * processed by removing it from the work queue. If the item is being
3146 : : * processed, the work item will continue to be processed, but resubmissions
3147 : : * are rejected until cancellation completes.
3148 : : *
3149 : : * If this returns zero cancellation is complete, otherwise something
3150 : : * (probably a work queue thread) is still referencing the item.
3151 : : *
3152 : : * See also k_work_cancel_sync().
3153 : : *
3154 : : * @funcprops \isr_ok
3155 : : *
3156 : : * @param work pointer to the work item.
3157 : : *
3158 : : * @return the k_work_busy_get() status indicating the state of the item after all
3159 : : * cancellation steps performed by this call are completed.
3160 : : */
3161 : : int k_work_cancel(struct k_work *work);
3162 : :
3163 : : /** @brief Cancel a work item and wait for it to complete.
3164 : : *
3165 : : * Same as k_work_cancel() but does not return until cancellation is complete.
3166 : : * This can be invoked by a thread after k_work_cancel() to synchronize with a
3167 : : * previous cancellation.
3168 : : *
3169 : : * On return the work structure will be idle unless something submits it after
3170 : : * the cancellation was complete.
3171 : : *
3172 : : * @note Be careful of caller and work queue thread relative priority. If
3173 : : * this function sleeps it will not return until the work queue thread
3174 : : * completes the tasks that allow this thread to resume.
3175 : : *
3176 : : * @note Behavior is undefined if this function is invoked on @p work from a
3177 : : * work queue running @p work.
3178 : : *
3179 : : * @param work pointer to the work item.
3180 : : *
3181 : : * @param sync pointer to an opaque item containing state related to the
3182 : : * pending cancellation. The object must persist until the call returns, and
3183 : : * be accessible from both the caller thread and the work queue thread. The
3184 : : * object must not be used for any other flush or cancel operation until this
3185 : : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3186 : : * must be allocated in coherent memory.
3187 : : *
3188 : : * @retval true if work was pending (call had to wait for cancellation of a
3189 : : * running handler to complete, or scheduled or submitted operations were
3190 : : * cancelled);
3191 : : * @retval false otherwise
3192 : : */
3193 : : bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3194 : :
3195 : : /** @brief Initialize a work queue structure.
3196 : : *
3197 : : * This must be invoked before starting a work queue structure for the first time.
3198 : : * It need not be invoked again on the same work queue structure.
3199 : : *
3200 : : * @funcprops \isr_ok
3201 : : *
3202 : : * @param queue the queue structure to be initialized.
3203 : : */
3204 : : void k_work_queue_init(struct k_work_q *queue);
3205 : :
3206 : : /** @brief Initialize a work queue.
3207 : : *
3208 : : * This configures the work queue thread and starts it running. The function
3209 : : * should not be re-invoked on a queue.
3210 : : *
3211 : : * @param queue pointer to the queue structure. It must be initialized
3212 : : * in zeroed/bss memory or with @ref k_work_queue_init before
3213 : : * use.
3214 : : *
3215 : : * @param stack pointer to the work thread stack area.
3216 : : *
3217 : : * @param stack_size size of the the work thread stack area, in bytes.
3218 : : *
3219 : : * @param prio initial thread priority
3220 : : *
3221 : : * @param cfg optional additional configuration parameters. Pass @c
3222 : : * NULL if not required, to use the defaults documented in
3223 : : * k_work_queue_config.
3224 : : */
3225 : : void k_work_queue_start(struct k_work_q *queue,
3226 : : k_thread_stack_t *stack, size_t stack_size,
3227 : : int prio, const struct k_work_queue_config *cfg);
3228 : :
3229 : : /** @brief Access the thread that animates a work queue.
3230 : : *
3231 : : * This is necessary to grant a work queue thread access to things the work
3232 : : * items it will process are expected to use.
3233 : : *
3234 : : * @param queue pointer to the queue structure.
3235 : : *
3236 : : * @return the thread associated with the work queue.
3237 : : */
3238 : : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3239 : :
3240 : : /** @brief Wait until the work queue has drained, optionally plugging it.
3241 : : *
3242 : : * This blocks submission to the work queue except when coming from queue
3243 : : * thread, and blocks the caller until no more work items are available in the
3244 : : * queue.
3245 : : *
3246 : : * If @p plug is true then submission will continue to be blocked after the
3247 : : * drain operation completes until k_work_queue_unplug() is invoked.
3248 : : *
3249 : : * Note that work items that are delayed are not yet associated with their
3250 : : * work queue. They must be cancelled externally if a goal is to ensure the
3251 : : * work queue remains empty. The @p plug feature can be used to prevent
3252 : : * delayed items from being submitted after the drain completes.
3253 : : *
3254 : : * @param queue pointer to the queue structure.
3255 : : *
3256 : : * @param plug if true the work queue will continue to block new submissions
3257 : : * after all items have drained.
3258 : : *
3259 : : * @retval 1 if call had to wait for the drain to complete
3260 : : * @retval 0 if call did not have to wait
3261 : : * @retval negative if wait was interrupted or failed
3262 : : */
3263 : : int k_work_queue_drain(struct k_work_q *queue, bool plug);
3264 : :
3265 : : /** @brief Release a work queue to accept new submissions.
3266 : : *
3267 : : * This releases the block on new submissions placed when k_work_queue_drain()
3268 : : * is invoked with the @p plug option enabled. If this is invoked before the
3269 : : * drain completes new items may be submitted as soon as the drain completes.
3270 : : *
3271 : : * @funcprops \isr_ok
3272 : : *
3273 : : * @param queue pointer to the queue structure.
3274 : : *
3275 : : * @retval 0 if successfully unplugged
3276 : : * @retval -EALREADY if the work queue was not plugged.
3277 : : */
3278 : : int k_work_queue_unplug(struct k_work_q *queue);
3279 : :
3280 : : /** @brief Initialize a delayable work structure.
3281 : : *
3282 : : * This must be invoked before scheduling a delayable work structure for the
3283 : : * first time. It need not be invoked again on the same work structure. It
3284 : : * can be re-invoked to change the associated handler, but this must be done
3285 : : * when the work item is idle.
3286 : : *
3287 : : * @funcprops \isr_ok
3288 : : *
3289 : : * @param dwork the delayable work structure to be initialized.
3290 : : *
3291 : : * @param handler the handler to be invoked by the work item.
3292 : : */
3293 : : void k_work_init_delayable(struct k_work_delayable *dwork,
3294 : : k_work_handler_t handler);
3295 : :
3296 : : /**
3297 : : * @brief Get the parent delayable work structure from a work pointer.
3298 : : *
3299 : : * This function is necessary when a @c k_work_handler_t function is passed to
3300 : : * k_work_schedule_for_queue() and the handler needs to access data from the
3301 : : * container of the containing `k_work_delayable`.
3302 : : *
3303 : : * @param work Address passed to the work handler
3304 : : *
3305 : : * @return Address of the containing @c k_work_delayable structure.
3306 : : */
3307 : : static inline struct k_work_delayable *
3308 : : k_work_delayable_from_work(struct k_work *work);
3309 : :
3310 : : /** @brief Busy state flags from the delayable work item.
3311 : : *
3312 : : * @funcprops \isr_ok
3313 : : *
3314 : : * @note This is a live snapshot of state, which may change before the result
3315 : : * can be inspected. Use locks where appropriate.
3316 : : *
3317 : : * @param dwork pointer to the delayable work item.
3318 : : *
3319 : : * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3320 : : * K_WORK_CANCELING. A zero return value indicates the work item appears to
3321 : : * be idle.
3322 : : */
3323 : : int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3324 : :
3325 : : /** @brief Test whether a delayed work item is currently pending.
3326 : : *
3327 : : * Wrapper to determine whether a delayed work item is in a non-idle state.
3328 : : *
3329 : : * @note This is a live snapshot of state, which may change before the result
3330 : : * can be inspected. Use locks where appropriate.
3331 : : *
3332 : : * @funcprops \isr_ok
3333 : : *
3334 : : * @param dwork pointer to the delayable work item.
3335 : : *
3336 : : * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3337 : : * value.
3338 : : */
3339 : : static inline bool k_work_delayable_is_pending(
3340 : : const struct k_work_delayable *dwork);
3341 : :
3342 : : /** @brief Get the absolute tick count at which a scheduled delayable work
3343 : : * will be submitted.
3344 : : *
3345 : : * @note This is a live snapshot of state, which may change before the result
3346 : : * can be inspected. Use locks where appropriate.
3347 : : *
3348 : : * @funcprops \isr_ok
3349 : : *
3350 : : * @param dwork pointer to the delayable work item.
3351 : : *
3352 : : * @return the tick count when the timer that will schedule the work item will
3353 : : * expire, or the current tick count if the work is not scheduled.
3354 : : */
3355 : : static inline k_ticks_t k_work_delayable_expires_get(
3356 : : const struct k_work_delayable *dwork);
3357 : :
3358 : : /** @brief Get the number of ticks until a scheduled delayable work will be
3359 : : * submitted.
3360 : : *
3361 : : * @note This is a live snapshot of state, which may change before the result
3362 : : * can be inspected. Use locks where appropriate.
3363 : : *
3364 : : * @funcprops \isr_ok
3365 : : *
3366 : : * @param dwork pointer to the delayable work item.
3367 : : *
3368 : : * @return the number of ticks until the timer that will schedule the work
3369 : : * item will expire, or zero if the item is not scheduled.
3370 : : */
3371 : : static inline k_ticks_t k_work_delayable_remaining_get(
3372 : : const struct k_work_delayable *dwork);
3373 : :
3374 : : /** @brief Submit an idle work item to a queue after a delay.
3375 : : *
3376 : : * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3377 : : * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3378 : : *
3379 : : * @funcprops \isr_ok
3380 : : *
3381 : : * @param queue the queue on which the work item should be submitted after the
3382 : : * delay.
3383 : : *
3384 : : * @param dwork pointer to the delayable work item.
3385 : : *
3386 : : * @param delay the time to wait before submitting the work item. If @c
3387 : : * K_NO_WAIT and the work is not pending this is equivalent to
3388 : : * k_work_submit_to_queue().
3389 : : *
3390 : : * @retval 0 if work was already scheduled or submitted.
3391 : : * @retval 1 if work has been scheduled.
3392 : : * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3393 : : * k_work_submit_to_queue() fails with this code.
3394 : : * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3395 : : * k_work_submit_to_queue() fails with this code.
3396 : : * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3397 : : * k_work_submit_to_queue() fails with this code.
3398 : : */
3399 : : int k_work_schedule_for_queue(struct k_work_q *queue,
3400 : : struct k_work_delayable *dwork,
3401 : : k_timeout_t delay);
3402 : :
3403 : : /** @brief Submit an idle work item to the system work queue after a
3404 : : * delay.
3405 : : *
3406 : : * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3407 : : * characteristics of that function.
3408 : : *
3409 : : * @param dwork pointer to the delayable work item.
3410 : : *
3411 : : * @param delay the time to wait before submitting the work item. If @c
3412 : : * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3413 : : *
3414 : : * @return as with k_work_schedule_for_queue().
3415 : : */
3416 : : extern int k_work_schedule(struct k_work_delayable *dwork,
3417 : : k_timeout_t delay);
3418 : :
3419 : : /** @brief Reschedule a work item to a queue after a delay.
3420 : : *
3421 : : * Unlike k_work_schedule_for_queue() this function can change the deadline of
3422 : : * a scheduled work item, and will schedule a work item that isn't idle
3423 : : * (e.g. is submitted or running). This function does not affect ("unsubmit")
3424 : : * a work item that has been submitted to a queue.
3425 : : *
3426 : : * @funcprops \isr_ok
3427 : : *
3428 : : * @param queue the queue on which the work item should be submitted after the
3429 : : * delay.
3430 : : *
3431 : : * @param dwork pointer to the delayable work item.
3432 : : *
3433 : : * @param delay the time to wait before submitting the work item. If @c
3434 : : * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3435 : : * any previous scheduled submission.
3436 : : *
3437 : : * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3438 : : * k_work_submit_to_queue().
3439 : : *
3440 : : * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3441 : : * @retval 1 if
3442 : : * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3443 : : * to @p queue; or
3444 : : * * delay not @c K_NO_WAIT and work has been scheduled
3445 : : * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3446 : : * to the queue that was running it
3447 : : * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3448 : : * k_work_submit_to_queue() fails with this code.
3449 : : * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3450 : : * k_work_submit_to_queue() fails with this code.
3451 : : * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3452 : : * k_work_submit_to_queue() fails with this code.
3453 : : */
3454 : : int k_work_reschedule_for_queue(struct k_work_q *queue,
3455 : : struct k_work_delayable *dwork,
3456 : : k_timeout_t delay);
3457 : :
3458 : : /** @brief Reschedule a work item to the system work queue after a
3459 : : * delay.
3460 : : *
3461 : : * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3462 : : * API characteristics of that function.
3463 : : *
3464 : : * @param dwork pointer to the delayable work item.
3465 : : *
3466 : : * @param delay the time to wait before submitting the work item.
3467 : : *
3468 : : * @return as with k_work_reschedule_for_queue().
3469 : : */
3470 : : extern int k_work_reschedule(struct k_work_delayable *dwork,
3471 : : k_timeout_t delay);
3472 : :
3473 : : /** @brief Flush delayable work.
3474 : : *
3475 : : * If the work is scheduled, it is immediately submitted. Then the caller
3476 : : * blocks until the work completes, as with k_work_flush().
3477 : : *
3478 : : * @note Be careful of caller and work queue thread relative priority. If
3479 : : * this function sleeps it will not return until the work queue thread
3480 : : * completes the tasks that allow this thread to resume.
3481 : : *
3482 : : * @note Behavior is undefined if this function is invoked on @p dwork from a
3483 : : * work queue running @p dwork.
3484 : : *
3485 : : * @param dwork pointer to the delayable work item.
3486 : : *
3487 : : * @param sync pointer to an opaque item containing state related to the
3488 : : * pending cancellation. The object must persist until the call returns, and
3489 : : * be accessible from both the caller thread and the work queue thread. The
3490 : : * object must not be used for any other flush or cancel operation until this
3491 : : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3492 : : * must be allocated in coherent memory.
3493 : : *
3494 : : * @retval true if call had to wait for completion
3495 : : * @retval false if work was already idle
3496 : : */
3497 : : bool k_work_flush_delayable(struct k_work_delayable *dwork,
3498 : : struct k_work_sync *sync);
3499 : :
3500 : : /** @brief Cancel delayable work.
3501 : : *
3502 : : * Similar to k_work_cancel() but for delayable work. If the work is
3503 : : * scheduled or submitted it is canceled. This function does not wait for the
3504 : : * cancellation to complete.
3505 : : *
3506 : : * @note The work may still be running when this returns. Use
3507 : : * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3508 : : * not running.
3509 : : *
3510 : : * @note Canceling delayable work does not prevent rescheduling it. It does
3511 : : * prevent submitting it until the cancellation completes.
3512 : : *
3513 : : * @funcprops \isr_ok
3514 : : *
3515 : : * @param dwork pointer to the delayable work item.
3516 : : *
3517 : : * @return the k_work_delayable_busy_get() status indicating the state of the
3518 : : * item after all cancellation steps performed by this call are completed.
3519 : : */
3520 : : int k_work_cancel_delayable(struct k_work_delayable *dwork);
3521 : :
3522 : : /** @brief Cancel delayable work and wait.
3523 : : *
3524 : : * Like k_work_cancel_delayable() but waits until the work becomes idle.
3525 : : *
3526 : : * @note Canceling delayable work does not prevent rescheduling it. It does
3527 : : * prevent submitting it until the cancellation completes.
3528 : : *
3529 : : * @note Be careful of caller and work queue thread relative priority. If
3530 : : * this function sleeps it will not return until the work queue thread
3531 : : * completes the tasks that allow this thread to resume.
3532 : : *
3533 : : * @note Behavior is undefined if this function is invoked on @p dwork from a
3534 : : * work queue running @p dwork.
3535 : : *
3536 : : * @param dwork pointer to the delayable work item.
3537 : : *
3538 : : * @param sync pointer to an opaque item containing state related to the
3539 : : * pending cancellation. The object must persist until the call returns, and
3540 : : * be accessible from both the caller thread and the work queue thread. The
3541 : : * object must not be used for any other flush or cancel operation until this
3542 : : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3543 : : * must be allocated in coherent memory.
3544 : : *
3545 : : * @retval true if work was not idle (call had to wait for cancellation of a
3546 : : * running handler to complete, or scheduled or submitted operations were
3547 : : * cancelled);
3548 : : * @retval false otherwise
3549 : : */
3550 : : bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3551 : : struct k_work_sync *sync);
3552 : :
3553 : : enum {
3554 : : /**
3555 : : * @cond INTERNAL_HIDDEN
3556 : : */
3557 : :
3558 : : /* The atomic API is used for all work and queue flags fields to
3559 : : * enforce sequential consistency in SMP environments.
3560 : : */
3561 : :
3562 : : /* Bits that represent the work item states. At least nine of the
3563 : : * combinations are distinct valid stable states.
3564 : : */
3565 : : K_WORK_RUNNING_BIT = 0,
3566 : : K_WORK_CANCELING_BIT = 1,
3567 : : K_WORK_QUEUED_BIT = 2,
3568 : : K_WORK_DELAYED_BIT = 3,
3569 : :
3570 : : K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3571 : : | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3572 : :
3573 : : /* Static work flags */
3574 : : K_WORK_DELAYABLE_BIT = 8,
3575 : : K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3576 : :
3577 : : /* Dynamic work queue flags */
3578 : : K_WORK_QUEUE_STARTED_BIT = 0,
3579 : : K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3580 : : K_WORK_QUEUE_BUSY_BIT = 1,
3581 : : K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3582 : : K_WORK_QUEUE_DRAIN_BIT = 2,
3583 : : K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3584 : : K_WORK_QUEUE_PLUGGED_BIT = 3,
3585 : : K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3586 : :
3587 : : /* Static work queue flags */
3588 : : K_WORK_QUEUE_NO_YIELD_BIT = 8,
3589 : : K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3590 : :
3591 : : /**
3592 : : * INTERNAL_HIDDEN @endcond
3593 : : */
3594 : : /* Transient work flags */
3595 : :
3596 : : /** @brief Flag indicating a work item that is running under a work
3597 : : * queue thread.
3598 : : *
3599 : : * Accessed via k_work_busy_get(). May co-occur with other flags.
3600 : : */
3601 : : K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3602 : :
3603 : : /** @brief Flag indicating a work item that is being canceled.
3604 : : *
3605 : : * Accessed via k_work_busy_get(). May co-occur with other flags.
3606 : : */
3607 : : K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3608 : :
3609 : : /** @brief Flag indicating a work item that has been submitted to a
3610 : : * queue but has not started running.
3611 : : *
3612 : : * Accessed via k_work_busy_get(). May co-occur with other flags.
3613 : : */
3614 : : K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3615 : :
3616 : : /** @brief Flag indicating a delayed work item that is scheduled for
3617 : : * submission to a queue.
3618 : : *
3619 : : * Accessed via k_work_busy_get(). May co-occur with other flags.
3620 : : */
3621 : : K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3622 : : };
3623 : :
3624 : : /** @brief A structure used to submit work. */
3625 : : struct k_work {
3626 : : /* All fields are protected by the work module spinlock. No fields
3627 : : * are to be accessed except through kernel API.
3628 : : */
3629 : :
3630 : : /* Node to link into k_work_q pending list. */
3631 : : sys_snode_t node;
3632 : :
3633 : : /* The function to be invoked by the work queue thread. */
3634 : : k_work_handler_t handler;
3635 : :
3636 : : /* The queue on which the work item was last submitted. */
3637 : : struct k_work_q *queue;
3638 : :
3639 : : /* State of the work item.
3640 : : *
3641 : : * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3642 : : *
3643 : : * It can be RUNNING and CANCELING simultaneously.
3644 : : */
3645 : : uint32_t flags;
3646 : : };
3647 : :
3648 : : #define Z_WORK_INITIALIZER(work_handler) { \
3649 : : .handler = work_handler, \
3650 : : }
3651 : :
3652 : : /** @brief A structure used to submit work after a delay. */
3653 : : struct k_work_delayable {
3654 : : /* The work item. */
3655 : : struct k_work work;
3656 : :
3657 : : /* Timeout used to submit work after a delay. */
3658 : : struct _timeout timeout;
3659 : :
3660 : : /* The queue to which the work should be submitted. */
3661 : : struct k_work_q *queue;
3662 : : };
3663 : :
3664 : : #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3665 : : .work = { \
3666 : : .handler = work_handler, \
3667 : : .flags = K_WORK_DELAYABLE, \
3668 : : }, \
3669 : : }
3670 : :
3671 : : /**
3672 : : * @brief Initialize a statically-defined delayable work item.
3673 : : *
3674 : : * This macro can be used to initialize a statically-defined delayable
3675 : : * work item, prior to its first use. For example,
3676 : : *
3677 : : * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3678 : : *
3679 : : * Note that if the runtime dependencies support initialization with
3680 : : * k_work_init_delayable() using that will eliminate the initialized
3681 : : * object in ROM that is produced by this macro and copied in at
3682 : : * system startup.
3683 : : *
3684 : : * @param work Symbol name for delayable work item object
3685 : : * @param work_handler Function to invoke each time work item is processed.
3686 : : */
3687 : : #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3688 : : struct k_work_delayable work \
3689 : : = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3690 : :
3691 : : /**
3692 : : * @cond INTERNAL_HIDDEN
3693 : : */
3694 : :
3695 : : /* Record used to wait for work to flush.
3696 : : *
3697 : : * The work item is inserted into the queue that will process (or is
3698 : : * processing) the item, and will be processed as soon as the item
3699 : : * completes. When the flusher is processed the semaphore will be
3700 : : * signaled, releasing the thread waiting for the flush.
3701 : : */
3702 : : struct z_work_flusher {
3703 : : struct k_work work;
3704 : : struct k_sem sem;
3705 : : };
3706 : :
3707 : : /* Record used to wait for work to complete a cancellation.
3708 : : *
3709 : : * The work item is inserted into a global queue of pending cancels.
3710 : : * When a cancelling work item goes idle any matching waiters are
3711 : : * removed from pending_cancels and are woken.
3712 : : */
3713 : : struct z_work_canceller {
3714 : : sys_snode_t node;
3715 : : struct k_work *work;
3716 : : struct k_sem sem;
3717 : : };
3718 : :
3719 : : /**
3720 : : * INTERNAL_HIDDEN @endcond
3721 : : */
3722 : :
3723 : : /** @brief A structure holding internal state for a pending synchronous
3724 : : * operation on a work item or queue.
3725 : : *
3726 : : * Instances of this type are provided by the caller for invocation of
3727 : : * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3728 : : * referenced object must persist until the call returns, and be accessible
3729 : : * from both the caller thread and the work queue thread.
3730 : : *
3731 : : * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3732 : : * coherent memory; see arch_mem_coherent(). The stack on these architectures
3733 : : * is generally not coherent. be stack-allocated. Violations are detected by
3734 : : * runtime assertion.
3735 : : */
3736 : : struct k_work_sync {
3737 : : union {
3738 : : struct z_work_flusher flusher;
3739 : : struct z_work_canceller canceller;
3740 : : };
3741 : : };
3742 : :
3743 : : /** @brief A structure holding optional configuration items for a work
3744 : : * queue.
3745 : : *
3746 : : * This structure, and values it references, are not retained by
3747 : : * k_work_queue_start().
3748 : : */
3749 : : struct k_work_queue_config {
3750 : : /** The name to be given to the work queue thread.
3751 : : *
3752 : : * If left null the thread will not have a name.
3753 : : */
3754 : : const char *name;
3755 : :
3756 : : /** Control whether the work queue thread should yield between
3757 : : * items.
3758 : : *
3759 : : * Yielding between items helps guarantee the work queue
3760 : : * thread does not starve other threads, including cooperative
3761 : : * ones released by a work item. This is the default behavior.
3762 : : *
3763 : : * Set this to @c true to prevent the work queue thread from
3764 : : * yielding between items. This may be appropriate when a
3765 : : * sequence of items should complete without yielding
3766 : : * control.
3767 : : */
3768 : : bool no_yield;
3769 : : };
3770 : :
3771 : : /** @brief A structure used to hold work until it can be processed. */
3772 : : struct k_work_q {
3773 : : /* The thread that animates the work. */
3774 : : struct k_thread thread;
3775 : :
3776 : : /* All the following fields must be accessed only while the
3777 : : * work module spinlock is held.
3778 : : */
3779 : :
3780 : : /* List of k_work items to be worked. */
3781 : : sys_slist_t pending;
3782 : :
3783 : : /* Wait queue for idle work thread. */
3784 : : _wait_q_t notifyq;
3785 : :
3786 : : /* Wait queue for threads waiting for the queue to drain. */
3787 : : _wait_q_t drainq;
3788 : :
3789 : : /* Flags describing queue state. */
3790 : : uint32_t flags;
3791 : : };
3792 : :
3793 : : /* Provide the implementation for inline functions declared above */
3794 : :
3795 : : static inline bool k_work_is_pending(const struct k_work *work)
3796 : : {
3797 : : return k_work_busy_get(work) != 0;
3798 : : }
3799 : :
3800 : : static inline struct k_work_delayable *
3801 : : k_work_delayable_from_work(struct k_work *work)
3802 : : {
3803 : : return CONTAINER_OF(work, struct k_work_delayable, work);
3804 : : }
3805 : :
3806 : : static inline bool k_work_delayable_is_pending(
3807 : : const struct k_work_delayable *dwork)
3808 : : {
3809 : : return k_work_delayable_busy_get(dwork) != 0;
3810 : : }
3811 : :
3812 : : static inline k_ticks_t k_work_delayable_expires_get(
3813 : : const struct k_work_delayable *dwork)
3814 : : {
3815 : : return z_timeout_expires(&dwork->timeout);
3816 : : }
3817 : :
3818 : : static inline k_ticks_t k_work_delayable_remaining_get(
3819 : : const struct k_work_delayable *dwork)
3820 : : {
3821 : : return z_timeout_remaining(&dwork->timeout);
3822 : : }
3823 : :
3824 : : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3825 : : {
3826 : : return &queue->thread;
3827 : : }
3828 : :
3829 : : /* Legacy wrappers */
3830 : :
3831 : : __deprecated
3832 : : static inline bool k_work_pending(const struct k_work *work)
3833 : : {
3834 : : return k_work_is_pending(work);
3835 : : }
3836 : :
3837 : : __deprecated
3838 : : static inline void k_work_q_start(struct k_work_q *work_q,
3839 : : k_thread_stack_t *stack,
3840 : : size_t stack_size, int prio)
3841 : : {
3842 : : k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3843 : : }
3844 : :
3845 : : /* deprecated, remove when corresponding deprecated API is removed. */
3846 : : struct k_delayed_work {
3847 : : struct k_work_delayable work;
3848 : : };
3849 : :
3850 : : #define Z_DELAYED_WORK_INITIALIZER(work_handler) __DEPRECATED_MACRO { \
3851 : : .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3852 : : }
3853 : :
3854 : : __deprecated
3855 : : static inline void k_delayed_work_init(struct k_delayed_work *work,
3856 : : k_work_handler_t handler)
3857 : : {
3858 : : k_work_init_delayable(&work->work, handler);
3859 : : }
3860 : :
3861 : : __deprecated
3862 : : static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3863 : : struct k_delayed_work *work,
3864 : : k_timeout_t delay)
3865 : : {
3866 : : int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3867 : :
3868 : : /* Legacy API doesn't distinguish success cases. */
3869 : : return (rc >= 0) ? 0 : rc;
3870 : : }
3871 : :
3872 : : __deprecated
3873 : : static inline int k_delayed_work_submit(struct k_delayed_work *work,
3874 : : k_timeout_t delay)
3875 : : {
3876 : : int rc = k_work_reschedule(&work->work, delay);
3877 : :
3878 : : /* Legacy API doesn't distinguish success cases. */
3879 : : return (rc >= 0) ? 0 : rc;
3880 : : }
3881 : :
3882 : : __deprecated
3883 : : static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3884 : : {
3885 : : bool pending = k_work_delayable_is_pending(&work->work);
3886 : : int rc = k_work_cancel_delayable(&work->work);
3887 : :
3888 : : /* Old return value rules:
3889 : : *
3890 : : * 0 if:
3891 : : * * Work item countdown cancelled before the item was submitted to
3892 : : * its queue; or
3893 : : * * Work item was removed from its queue before it was processed.
3894 : : *
3895 : : * -EINVAL if:
3896 : : * * Work item has never been submitted; or
3897 : : * * Work item has been successfully cancelled; or
3898 : : * * Timeout handler is in the process of submitting the work item to
3899 : : * its queue; or
3900 : : * * Work queue thread has removed the work item from the queue but
3901 : : * has not called its handler.
3902 : : *
3903 : : * -EALREADY if:
3904 : : * * Work queue thread has removed the work item from the queue and
3905 : : * cleared its pending flag; or
3906 : : * * Work queue thread is invoking the item handler; or
3907 : : * * Work item handler has completed.
3908 : : *
3909 : :
3910 : : * We can't reconstruct those states, so call it successful only when
3911 : : * a pending item is no longer pending, -EINVAL if it was pending and
3912 : : * still is, and cancel, and -EALREADY if it wasn't pending (so
3913 : : * presumably cancellation should have had no effect, assuming we
3914 : : * didn't hit a race condition).
3915 : : */
3916 : : if (pending) {
3917 : : return (rc == 0) ? 0 : -EINVAL;
3918 : : }
3919 : :
3920 : : return -EALREADY;
3921 : : }
3922 : :
3923 : : __deprecated
3924 : : static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3925 : : {
3926 : : return k_work_delayable_is_pending(&work->work);
3927 : : }
3928 : :
3929 : : __deprecated
3930 : : static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3931 : : {
3932 : : k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3933 : :
3934 : : /* Probably should be ceil32, but was floor32 */
3935 : : return k_ticks_to_ms_floor32(rem);
3936 : : }
3937 : :
3938 : : __deprecated
3939 : : static inline k_ticks_t k_delayed_work_expires_ticks(
3940 : : struct k_delayed_work *work)
3941 : : {
3942 : : return k_work_delayable_expires_get(&work->work);
3943 : : }
3944 : :
3945 : : __deprecated
3946 : : static inline k_ticks_t k_delayed_work_remaining_ticks(
3947 : : struct k_delayed_work *work)
3948 : : {
3949 : : return k_work_delayable_remaining_get(&work->work);
3950 : : }
3951 : :
3952 : : /** @} */
3953 : :
3954 : : struct k_work_user;
3955 : :
3956 : : /**
3957 : : * @addtogroup workqueue_apis
3958 : : * @{
3959 : : */
3960 : :
3961 : : /**
3962 : : * @typedef k_work_user_handler_t
3963 : : * @brief Work item handler function type for user work queues.
3964 : : *
3965 : : * A work item's handler function is executed by a user workqueue's thread
3966 : : * when the work item is processed by the workqueue.
3967 : : *
3968 : : * @param work Address of the work item.
3969 : : */
3970 : : typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3971 : :
3972 : : /**
3973 : : * @cond INTERNAL_HIDDEN
3974 : : */
3975 : :
3976 : : struct k_work_user_q {
3977 : : struct k_queue queue;
3978 : : struct k_thread thread;
3979 : : };
3980 : :
3981 : : enum {
3982 : : K_WORK_USER_STATE_PENDING, /* Work item pending state */
3983 : : };
3984 : :
3985 : : struct k_work_user {
3986 : : void *_reserved; /* Used by k_queue implementation. */
3987 : : k_work_user_handler_t handler;
3988 : : atomic_t flags;
3989 : : };
3990 : :
3991 : : /**
3992 : : * INTERNAL_HIDDEN @endcond
3993 : : */
3994 : :
3995 : : #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
3996 : : #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
3997 : : #else
3998 : : #define Z_WORK_USER_INITIALIZER(work_handler) \
3999 : : { \
4000 : : ._reserved = NULL, \
4001 : : .handler = work_handler, \
4002 : : .flags = 0 \
4003 : : }
4004 : : #endif
4005 : :
4006 : : /**
4007 : : * @brief Initialize a statically-defined user work item.
4008 : : *
4009 : : * This macro can be used to initialize a statically-defined user work
4010 : : * item, prior to its first use. For example,
4011 : : *
4012 : : * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4013 : : *
4014 : : * @param work Symbol name for work item object
4015 : : * @param work_handler Function to invoke each time work item is processed.
4016 : : */
4017 : : #define K_WORK_USER_DEFINE(work, work_handler) \
4018 : : struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4019 : :
4020 : : /**
4021 : : * @brief Initialize a userspace work item.
4022 : : *
4023 : : * This routine initializes a user workqueue work item, prior to its
4024 : : * first use.
4025 : : *
4026 : : * @param work Address of work item.
4027 : : * @param handler Function to invoke each time work item is processed.
4028 : : */
4029 : : static inline void k_work_user_init(struct k_work_user *work,
4030 : : k_work_user_handler_t handler)
4031 : : {
4032 : : *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4033 : : }
4034 : :
4035 : : /**
4036 : : * @brief Check if a userspace work item is pending.
4037 : : *
4038 : : * This routine indicates if user work item @a work is pending in a workqueue's
4039 : : * queue.
4040 : : *
4041 : : * @note Checking if the work is pending gives no guarantee that the
4042 : : * work will still be pending when this information is used. It is up to
4043 : : * the caller to make sure that this information is used in a safe manner.
4044 : : *
4045 : : * @funcprops \isr_ok
4046 : : *
4047 : : * @param work Address of work item.
4048 : : *
4049 : : * @return true if work item is pending, or false if it is not pending.
4050 : : */
4051 : : static inline bool k_work_user_is_pending(struct k_work_user *work)
4052 : : {
4053 : : return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4054 : : }
4055 : :
4056 : : /**
4057 : : * @brief Submit a work item to a user mode workqueue
4058 : : *
4059 : : * Submits a work item to a workqueue that runs in user mode. A temporary
4060 : : * memory allocation is made from the caller's resource pool which is freed
4061 : : * once the worker thread consumes the k_work item. The workqueue
4062 : : * thread must have memory access to the k_work item being submitted. The caller
4063 : : * must have permission granted on the work_q parameter's queue object.
4064 : : *
4065 : : * @funcprops \isr_ok
4066 : : *
4067 : : * @param work_q Address of workqueue.
4068 : : * @param work Address of work item.
4069 : : *
4070 : : * @retval -EBUSY if the work item was already in some workqueue
4071 : : * @retval -ENOMEM if no memory for thread resource pool allocation
4072 : : * @retval 0 Success
4073 : : */
4074 : : static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4075 : : struct k_work_user *work)
4076 : : {
4077 : : int ret = -EBUSY;
4078 : :
4079 : : if (!atomic_test_and_set_bit(&work->flags,
4080 : : K_WORK_USER_STATE_PENDING)) {
4081 : : ret = k_queue_alloc_append(&work_q->queue, work);
4082 : :
4083 : : /* Couldn't insert into the queue. Clear the pending bit
4084 : : * so the work item can be submitted again
4085 : : */
4086 : : if (ret != 0) {
4087 : : atomic_clear_bit(&work->flags,
4088 : : K_WORK_USER_STATE_PENDING);
4089 : : }
4090 : : }
4091 : :
4092 : : return ret;
4093 : : }
4094 : :
4095 : : /**
4096 : : * @brief Start a workqueue in user mode
4097 : : *
4098 : : * This works identically to k_work_queue_start() except it is callable from
4099 : : * user mode, and the worker thread created will run in user mode. The caller
4100 : : * must have permissions granted on both the work_q parameter's thread and
4101 : : * queue objects, and the same restrictions on priority apply as
4102 : : * k_thread_create().
4103 : : *
4104 : : * @param work_q Address of workqueue.
4105 : : * @param stack Pointer to work queue thread's stack space, as defined by
4106 : : * K_THREAD_STACK_DEFINE()
4107 : : * @param stack_size Size of the work queue thread's stack (in bytes), which
4108 : : * should either be the same constant passed to
4109 : : * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4110 : : * @param prio Priority of the work queue's thread.
4111 : : * @param name optional thread name. If not null a copy is made into the
4112 : : * thread's name buffer.
4113 : : */
4114 : : extern void k_work_user_queue_start(struct k_work_user_q *work_q,
4115 : : k_thread_stack_t *stack,
4116 : : size_t stack_size, int prio,
4117 : : const char *name);
4118 : :
4119 : : /** @} */
4120 : :
4121 : : /**
4122 : : * @cond INTERNAL_HIDDEN
4123 : : */
4124 : :
4125 : : struct k_work_poll {
4126 : : struct k_work work;
4127 : : struct k_work_q *workq;
4128 : : struct z_poller poller;
4129 : : struct k_poll_event *events;
4130 : : int num_events;
4131 : : k_work_handler_t real_handler;
4132 : : struct _timeout timeout;
4133 : : int poll_result;
4134 : : };
4135 : :
4136 : : /**
4137 : : * INTERNAL_HIDDEN @endcond
4138 : : */
4139 : :
4140 : : /**
4141 : : * @addtogroup workqueue_apis
4142 : : * @{
4143 : : */
4144 : :
4145 : : /**
4146 : : * @brief Initialize a statically-defined work item.
4147 : : *
4148 : : * This macro can be used to initialize a statically-defined workqueue work
4149 : : * item, prior to its first use. For example,
4150 : : *
4151 : : * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4152 : : *
4153 : : * @param work Symbol name for work item object
4154 : : * @param work_handler Function to invoke each time work item is processed.
4155 : : */
4156 : : #define K_WORK_DEFINE(work, work_handler) \
4157 : : struct k_work work = Z_WORK_INITIALIZER(work_handler)
4158 : :
4159 : : /**
4160 : : * @brief Initialize a statically-defined delayed work item.
4161 : : *
4162 : : * This macro can be used to initialize a statically-defined workqueue
4163 : : * delayed work item, prior to its first use. For example,
4164 : : *
4165 : : * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
4166 : : *
4167 : : * @param work Symbol name for delayed work item object
4168 : : * @param work_handler Function to invoke each time work item is processed.
4169 : : */
4170 : : #define K_DELAYED_WORK_DEFINE(work, work_handler) __DEPRECATED_MACRO \
4171 : : struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4172 : :
4173 : : /**
4174 : : * @brief Initialize a triggered work item.
4175 : : *
4176 : : * This routine initializes a workqueue triggered work item, prior to
4177 : : * its first use.
4178 : : *
4179 : : * @param work Address of triggered work item.
4180 : : * @param handler Function to invoke each time work item is processed.
4181 : : */
4182 : : extern void k_work_poll_init(struct k_work_poll *work,
4183 : : k_work_handler_t handler);
4184 : :
4185 : : /**
4186 : : * @brief Submit a triggered work item.
4187 : : *
4188 : : * This routine schedules work item @a work to be processed by workqueue
4189 : : * @a work_q when one of the given @a events is signaled. The routine
4190 : : * initiates internal poller for the work item and then returns to the caller.
4191 : : * Only when one of the watched events happen the work item is actually
4192 : : * submitted to the workqueue and becomes pending.
4193 : : *
4194 : : * Submitting a previously submitted triggered work item that is still
4195 : : * waiting for the event cancels the existing submission and reschedules it
4196 : : * the using the new event list. Note that this behavior is inherently subject
4197 : : * to race conditions with the pre-existing triggered work item and work queue,
4198 : : * so care must be taken to synchronize such resubmissions externally.
4199 : : *
4200 : : * @funcprops \isr_ok
4201 : : *
4202 : : * @warning
4203 : : * Provided array of events as well as a triggered work item must be placed
4204 : : * in persistent memory (valid until work handler execution or work
4205 : : * cancellation) and cannot be modified after submission.
4206 : : *
4207 : : * @param work_q Address of workqueue.
4208 : : * @param work Address of delayed work item.
4209 : : * @param events An array of events which trigger the work.
4210 : : * @param num_events The number of events in the array.
4211 : : * @param timeout Timeout after which the work will be scheduled
4212 : : * for execution even if not triggered.
4213 : : *
4214 : : *
4215 : : * @retval 0 Work item started watching for events.
4216 : : * @retval -EINVAL Work item is being processed or has completed its work.
4217 : : * @retval -EADDRINUSE Work item is pending on a different workqueue.
4218 : : */
4219 : : extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4220 : : struct k_work_poll *work,
4221 : : struct k_poll_event *events,
4222 : : int num_events,
4223 : : k_timeout_t timeout);
4224 : :
4225 : : /**
4226 : : * @brief Submit a triggered work item to the system workqueue.
4227 : : *
4228 : : * This routine schedules work item @a work to be processed by system
4229 : : * workqueue when one of the given @a events is signaled. The routine
4230 : : * initiates internal poller for the work item and then returns to the caller.
4231 : : * Only when one of the watched events happen the work item is actually
4232 : : * submitted to the workqueue and becomes pending.
4233 : : *
4234 : : * Submitting a previously submitted triggered work item that is still
4235 : : * waiting for the event cancels the existing submission and reschedules it
4236 : : * the using the new event list. Note that this behavior is inherently subject
4237 : : * to race conditions with the pre-existing triggered work item and work queue,
4238 : : * so care must be taken to synchronize such resubmissions externally.
4239 : : *
4240 : : * @funcprops \isr_ok
4241 : : *
4242 : : * @warning
4243 : : * Provided array of events as well as a triggered work item must not be
4244 : : * modified until the item has been processed by the workqueue.
4245 : : *
4246 : : * @param work Address of delayed work item.
4247 : : * @param events An array of events which trigger the work.
4248 : : * @param num_events The number of events in the array.
4249 : : * @param timeout Timeout after which the work will be scheduled
4250 : : * for execution even if not triggered.
4251 : : *
4252 : : * @retval 0 Work item started watching for events.
4253 : : * @retval -EINVAL Work item is being processed or has completed its work.
4254 : : * @retval -EADDRINUSE Work item is pending on a different workqueue.
4255 : : */
4256 : : extern int k_work_poll_submit(struct k_work_poll *work,
4257 : : struct k_poll_event *events,
4258 : : int num_events,
4259 : : k_timeout_t timeout);
4260 : :
4261 : : /**
4262 : : * @brief Cancel a triggered work item.
4263 : : *
4264 : : * This routine cancels the submission of triggered work item @a work.
4265 : : * A triggered work item can only be canceled if no event triggered work
4266 : : * submission.
4267 : : *
4268 : : * @funcprops \isr_ok
4269 : : *
4270 : : * @param work Address of delayed work item.
4271 : : *
4272 : : * @retval 0 Work item canceled.
4273 : : * @retval -EINVAL Work item is being processed or has completed its work.
4274 : : */
4275 : : extern int k_work_poll_cancel(struct k_work_poll *work);
4276 : :
4277 : : /** @} */
4278 : :
4279 : : /**
4280 : : * @defgroup msgq_apis Message Queue APIs
4281 : : * @ingroup kernel_apis
4282 : : * @{
4283 : : */
4284 : :
4285 : : /**
4286 : : * @brief Message Queue Structure
4287 : : */
4288 : : struct k_msgq {
4289 : : /** Message queue wait queue */
4290 : : _wait_q_t wait_q;
4291 : : /** Lock */
4292 : : struct k_spinlock lock;
4293 : : /** Message size */
4294 : : size_t msg_size;
4295 : : /** Maximal number of messages */
4296 : : uint32_t max_msgs;
4297 : : /** Start of message buffer */
4298 : : char *buffer_start;
4299 : : /** End of message buffer */
4300 : : char *buffer_end;
4301 : : /** Read pointer */
4302 : : char *read_ptr;
4303 : : /** Write pointer */
4304 : : char *write_ptr;
4305 : : /** Number of used messages */
4306 : : uint32_t used_msgs;
4307 : :
4308 : : _POLL_EVENT;
4309 : :
4310 : : /** Message queue */
4311 : : uint8_t flags;
4312 : :
4313 : : SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4314 : : };
4315 : : /**
4316 : : * @cond INTERNAL_HIDDEN
4317 : : */
4318 : :
4319 : :
4320 : : #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4321 : : { \
4322 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4323 : : .msg_size = q_msg_size, \
4324 : : .max_msgs = q_max_msgs, \
4325 : : .buffer_start = q_buffer, \
4326 : : .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4327 : : .read_ptr = q_buffer, \
4328 : : .write_ptr = q_buffer, \
4329 : : .used_msgs = 0, \
4330 : : _POLL_EVENT_OBJ_INIT(obj) \
4331 : : }
4332 : :
4333 : : /**
4334 : : * INTERNAL_HIDDEN @endcond
4335 : : */
4336 : :
4337 : :
4338 : : #define K_MSGQ_FLAG_ALLOC BIT(0)
4339 : :
4340 : : /**
4341 : : * @brief Message Queue Attributes
4342 : : */
4343 : : struct k_msgq_attrs {
4344 : : /** Message Size */
4345 : : size_t msg_size;
4346 : : /** Maximal number of messages */
4347 : : uint32_t max_msgs;
4348 : : /** Used messages */
4349 : : uint32_t used_msgs;
4350 : : };
4351 : :
4352 : :
4353 : : /**
4354 : : * @brief Statically define and initialize a message queue.
4355 : : *
4356 : : * The message queue's ring buffer contains space for @a q_max_msgs messages,
4357 : : * each of which is @a q_msg_size bytes long. The buffer is aligned to a
4358 : : * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4359 : : * message is similarly aligned to this boundary, @a q_msg_size must also be
4360 : : * a multiple of @a q_align.
4361 : : *
4362 : : * The message queue can be accessed outside the module where it is defined
4363 : : * using:
4364 : : *
4365 : : * @code extern struct k_msgq <name>; @endcode
4366 : : *
4367 : : * @param q_name Name of the message queue.
4368 : : * @param q_msg_size Message size (in bytes).
4369 : : * @param q_max_msgs Maximum number of messages that can be queued.
4370 : : * @param q_align Alignment of the message queue's ring buffer.
4371 : : *
4372 : : */
4373 : : #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4374 : : static char __noinit __aligned(q_align) \
4375 : : _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4376 : : STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4377 : : Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4378 : : q_msg_size, q_max_msgs)
4379 : :
4380 : : /**
4381 : : * @brief Initialize a message queue.
4382 : : *
4383 : : * This routine initializes a message queue object, prior to its first use.
4384 : : *
4385 : : * The message queue's ring buffer must contain space for @a max_msgs messages,
4386 : : * each of which is @a msg_size bytes long. The buffer must be aligned to an
4387 : : * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4388 : : * that each message is similarly aligned to this boundary, @a q_msg_size
4389 : : * must also be a multiple of N.
4390 : : *
4391 : : * @param msgq Address of the message queue.
4392 : : * @param buffer Pointer to ring buffer that holds queued messages.
4393 : : * @param msg_size Message size (in bytes).
4394 : : * @param max_msgs Maximum number of messages that can be queued.
4395 : : */
4396 : : void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4397 : : uint32_t max_msgs);
4398 : :
4399 : : /**
4400 : : * @brief Initialize a message queue.
4401 : : *
4402 : : * This routine initializes a message queue object, prior to its first use,
4403 : : * allocating its internal ring buffer from the calling thread's resource
4404 : : * pool.
4405 : : *
4406 : : * Memory allocated for the ring buffer can be released by calling
4407 : : * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4408 : : * all of its references.
4409 : : *
4410 : : * @param msgq Address of the message queue.
4411 : : * @param msg_size Message size (in bytes).
4412 : : * @param max_msgs Maximum number of messages that can be queued.
4413 : : *
4414 : : * @return 0 on success, -ENOMEM if there was insufficient memory in the
4415 : : * thread's resource pool, or -EINVAL if the size parameters cause
4416 : : * an integer overflow.
4417 : : */
4418 : : __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4419 : : uint32_t max_msgs);
4420 : :
4421 : : /**
4422 : : * @brief Release allocated buffer for a queue
4423 : : *
4424 : : * Releases memory allocated for the ring buffer.
4425 : : *
4426 : : * @param msgq message queue to cleanup
4427 : : *
4428 : : * @retval 0 on success
4429 : : * @retval -EBUSY Queue not empty
4430 : : */
4431 : : int k_msgq_cleanup(struct k_msgq *msgq);
4432 : :
4433 : : /**
4434 : : * @brief Send a message to a message queue.
4435 : : *
4436 : : * This routine sends a message to message queue @a q.
4437 : : *
4438 : : * @note The message content is copied from @a data into @a msgq and the @a data
4439 : : * pointer is not retained, so the message content will not be modified
4440 : : * by this function.
4441 : : *
4442 : : * @funcprops \isr_ok
4443 : : *
4444 : : * @param msgq Address of the message queue.
4445 : : * @param data Pointer to the message.
4446 : : * @param timeout Non-negative waiting period to add the message,
4447 : : * or one of the special values K_NO_WAIT and
4448 : : * K_FOREVER.
4449 : : *
4450 : : * @retval 0 Message sent.
4451 : : * @retval -ENOMSG Returned without waiting or queue purged.
4452 : : * @retval -EAGAIN Waiting period timed out.
4453 : : */
4454 : : __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4455 : :
4456 : : /**
4457 : : * @brief Receive a message from a message queue.
4458 : : *
4459 : : * This routine receives a message from message queue @a q in a "first in,
4460 : : * first out" manner.
4461 : : *
4462 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4463 : : *
4464 : : * @funcprops \isr_ok
4465 : : *
4466 : : * @param msgq Address of the message queue.
4467 : : * @param data Address of area to hold the received message.
4468 : : * @param timeout Waiting period to receive the message,
4469 : : * or one of the special values K_NO_WAIT and
4470 : : * K_FOREVER.
4471 : : *
4472 : : * @retval 0 Message received.
4473 : : * @retval -ENOMSG Returned without waiting.
4474 : : * @retval -EAGAIN Waiting period timed out.
4475 : : */
4476 : : __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4477 : :
4478 : : /**
4479 : : * @brief Peek/read a message from a message queue.
4480 : : *
4481 : : * This routine reads a message from message queue @a q in a "first in,
4482 : : * first out" manner and leaves the message in the queue.
4483 : : *
4484 : : * @funcprops \isr_ok
4485 : : *
4486 : : * @param msgq Address of the message queue.
4487 : : * @param data Address of area to hold the message read from the queue.
4488 : : *
4489 : : * @retval 0 Message read.
4490 : : * @retval -ENOMSG Returned when the queue has no message.
4491 : : */
4492 : : __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4493 : :
4494 : : /**
4495 : : * @brief Purge a message queue.
4496 : : *
4497 : : * This routine discards all unreceived messages in a message queue's ring
4498 : : * buffer. Any threads that are blocked waiting to send a message to the
4499 : : * message queue are unblocked and see an -ENOMSG error code.
4500 : : *
4501 : : * @param msgq Address of the message queue.
4502 : : */
4503 : : __syscall void k_msgq_purge(struct k_msgq *msgq);
4504 : :
4505 : : /**
4506 : : * @brief Get the amount of free space in a message queue.
4507 : : *
4508 : : * This routine returns the number of unused entries in a message queue's
4509 : : * ring buffer.
4510 : : *
4511 : : * @param msgq Address of the message queue.
4512 : : *
4513 : : * @return Number of unused ring buffer entries.
4514 : : */
4515 : : __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4516 : :
4517 : : /**
4518 : : * @brief Get basic attributes of a message queue.
4519 : : *
4520 : : * This routine fetches basic attributes of message queue into attr argument.
4521 : : *
4522 : : * @param msgq Address of the message queue.
4523 : : * @param attrs pointer to message queue attribute structure.
4524 : : */
4525 : : __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4526 : : struct k_msgq_attrs *attrs);
4527 : :
4528 : :
4529 : : static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4530 : : {
4531 : : return msgq->max_msgs - msgq->used_msgs;
4532 : : }
4533 : :
4534 : : /**
4535 : : * @brief Get the number of messages in a message queue.
4536 : : *
4537 : : * This routine returns the number of messages in a message queue's ring buffer.
4538 : : *
4539 : : * @param msgq Address of the message queue.
4540 : : *
4541 : : * @return Number of messages.
4542 : : */
4543 : : __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4544 : :
4545 : : static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4546 : : {
4547 : : return msgq->used_msgs;
4548 : : }
4549 : :
4550 : : /** @} */
4551 : :
4552 : : /**
4553 : : * @defgroup mailbox_apis Mailbox APIs
4554 : : * @ingroup kernel_apis
4555 : : * @{
4556 : : */
4557 : :
4558 : : /**
4559 : : * @brief Mailbox Message Structure
4560 : : *
4561 : : */
4562 : : struct k_mbox_msg {
4563 : : /** internal use only - needed for legacy API support */
4564 : : uint32_t _mailbox;
4565 : : /** size of message (in bytes) */
4566 : : size_t size;
4567 : : /** application-defined information value */
4568 : : uint32_t info;
4569 : : /** sender's message data buffer */
4570 : : void *tx_data;
4571 : : /** internal use only - needed for legacy API support */
4572 : : void *_rx_data;
4573 : : /** message data block descriptor */
4574 : : struct k_mem_block tx_block;
4575 : : /** source thread id */
4576 : : k_tid_t rx_source_thread;
4577 : : /** target thread id */
4578 : : k_tid_t tx_target_thread;
4579 : : /** internal use only - thread waiting on send (may be a dummy) */
4580 : : k_tid_t _syncing_thread;
4581 : : #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4582 : : /** internal use only - semaphore used during asynchronous send */
4583 : : struct k_sem *_async_sem;
4584 : : #endif
4585 : : };
4586 : : /**
4587 : : * @brief Mailbox Structure
4588 : : *
4589 : : */
4590 : : struct k_mbox {
4591 : : /** Transmit messages queue */
4592 : : _wait_q_t tx_msg_queue;
4593 : : /** Receive message queue */
4594 : : _wait_q_t rx_msg_queue;
4595 : : struct k_spinlock lock;
4596 : :
4597 : : SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4598 : : };
4599 : : /**
4600 : : * @cond INTERNAL_HIDDEN
4601 : : */
4602 : :
4603 : : #define Z_MBOX_INITIALIZER(obj) \
4604 : : { \
4605 : : .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4606 : : .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4607 : : }
4608 : :
4609 : : /**
4610 : : * INTERNAL_HIDDEN @endcond
4611 : : */
4612 : :
4613 : : /**
4614 : : * @brief Statically define and initialize a mailbox.
4615 : : *
4616 : : * The mailbox is to be accessed outside the module where it is defined using:
4617 : : *
4618 : : * @code extern struct k_mbox <name>; @endcode
4619 : : *
4620 : : * @param name Name of the mailbox.
4621 : : */
4622 : : #define K_MBOX_DEFINE(name) \
4623 : : STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4624 : : Z_MBOX_INITIALIZER(name) \
4625 : :
4626 : : /**
4627 : : * @brief Initialize a mailbox.
4628 : : *
4629 : : * This routine initializes a mailbox object, prior to its first use.
4630 : : *
4631 : : * @param mbox Address of the mailbox.
4632 : : */
4633 : : extern void k_mbox_init(struct k_mbox *mbox);
4634 : :
4635 : : /**
4636 : : * @brief Send a mailbox message in a synchronous manner.
4637 : : *
4638 : : * This routine sends a message to @a mbox and waits for a receiver to both
4639 : : * receive and process it. The message data may be in a buffer, in a memory
4640 : : * pool block, or non-existent (i.e. an empty message).
4641 : : *
4642 : : * @param mbox Address of the mailbox.
4643 : : * @param tx_msg Address of the transmit message descriptor.
4644 : : * @param timeout Waiting period for the message to be received,
4645 : : * or one of the special values K_NO_WAIT
4646 : : * and K_FOREVER. Once the message has been received,
4647 : : * this routine waits as long as necessary for the message
4648 : : * to be completely processed.
4649 : : *
4650 : : * @retval 0 Message sent.
4651 : : * @retval -ENOMSG Returned without waiting.
4652 : : * @retval -EAGAIN Waiting period timed out.
4653 : : */
4654 : : extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4655 : : k_timeout_t timeout);
4656 : :
4657 : : /**
4658 : : * @brief Send a mailbox message in an asynchronous manner.
4659 : : *
4660 : : * This routine sends a message to @a mbox without waiting for a receiver
4661 : : * to process it. The message data may be in a buffer, in a memory pool block,
4662 : : * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4663 : : * will be given when the message has been both received and completely
4664 : : * processed by the receiver.
4665 : : *
4666 : : * @param mbox Address of the mailbox.
4667 : : * @param tx_msg Address of the transmit message descriptor.
4668 : : * @param sem Address of a semaphore, or NULL if none is needed.
4669 : : */
4670 : : extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4671 : : struct k_sem *sem);
4672 : :
4673 : : /**
4674 : : * @brief Receive a mailbox message.
4675 : : *
4676 : : * This routine receives a message from @a mbox, then optionally retrieves
4677 : : * its data and disposes of the message.
4678 : : *
4679 : : * @param mbox Address of the mailbox.
4680 : : * @param rx_msg Address of the receive message descriptor.
4681 : : * @param buffer Address of the buffer to receive data, or NULL to defer data
4682 : : * retrieval and message disposal until later.
4683 : : * @param timeout Waiting period for a message to be received,
4684 : : * or one of the special values K_NO_WAIT and K_FOREVER.
4685 : : *
4686 : : * @retval 0 Message received.
4687 : : * @retval -ENOMSG Returned without waiting.
4688 : : * @retval -EAGAIN Waiting period timed out.
4689 : : */
4690 : : extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4691 : : void *buffer, k_timeout_t timeout);
4692 : :
4693 : : /**
4694 : : * @brief Retrieve mailbox message data into a buffer.
4695 : : *
4696 : : * This routine completes the processing of a received message by retrieving
4697 : : * its data into a buffer, then disposing of the message.
4698 : : *
4699 : : * Alternatively, this routine can be used to dispose of a received message
4700 : : * without retrieving its data.
4701 : : *
4702 : : * @param rx_msg Address of the receive message descriptor.
4703 : : * @param buffer Address of the buffer to receive data, or NULL to discard
4704 : : * the data.
4705 : : */
4706 : : extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4707 : :
4708 : : /** @} */
4709 : :
4710 : : /**
4711 : : * @defgroup pipe_apis Pipe APIs
4712 : : * @ingroup kernel_apis
4713 : : * @{
4714 : : */
4715 : :
4716 : : /** Pipe Structure */
4717 : : struct k_pipe {
4718 : : unsigned char *buffer; /**< Pipe buffer: may be NULL */
4719 : : size_t size; /**< Buffer size */
4720 : : size_t bytes_used; /**< # bytes used in buffer */
4721 : : size_t read_index; /**< Where in buffer to read from */
4722 : : size_t write_index; /**< Where in buffer to write */
4723 : : struct k_spinlock lock; /**< Synchronization lock */
4724 : :
4725 : : struct {
4726 : : _wait_q_t readers; /**< Reader wait queue */
4727 : : _wait_q_t writers; /**< Writer wait queue */
4728 : : } wait_q; /** Wait queue */
4729 : :
4730 : : uint8_t flags; /**< Flags */
4731 : :
4732 : : SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4733 : : };
4734 : :
4735 : : /**
4736 : : * @cond INTERNAL_HIDDEN
4737 : : */
4738 : : #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4739 : :
4740 : : #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
4741 : : { \
4742 : : .buffer = pipe_buffer, \
4743 : : .size = pipe_buffer_size, \
4744 : : .bytes_used = 0, \
4745 : : .read_index = 0, \
4746 : : .write_index = 0, \
4747 : : .lock = {}, \
4748 : : .wait_q = { \
4749 : : .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4750 : : .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
4751 : : }, \
4752 : : .flags = 0 \
4753 : : }
4754 : :
4755 : : /**
4756 : : * INTERNAL_HIDDEN @endcond
4757 : : */
4758 : :
4759 : : /**
4760 : : * @brief Statically define and initialize a pipe.
4761 : : *
4762 : : * The pipe can be accessed outside the module where it is defined using:
4763 : : *
4764 : : * @code extern struct k_pipe <name>; @endcode
4765 : : *
4766 : : * @param name Name of the pipe.
4767 : : * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4768 : : * or zero if no ring buffer is used.
4769 : : * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
4770 : : *
4771 : : */
4772 : : #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
4773 : : static unsigned char __noinit __aligned(pipe_align) \
4774 : : _k_pipe_buf_##name[pipe_buffer_size]; \
4775 : : STRUCT_SECTION_ITERABLE(k_pipe, name) = \
4776 : : Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
4777 : :
4778 : : /**
4779 : : * @brief Initialize a pipe.
4780 : : *
4781 : : * This routine initializes a pipe object, prior to its first use.
4782 : : *
4783 : : * @param pipe Address of the pipe.
4784 : : * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4785 : : * is used.
4786 : : * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4787 : : * buffer is used.
4788 : : */
4789 : : void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4790 : :
4791 : : /**
4792 : : * @brief Release a pipe's allocated buffer
4793 : : *
4794 : : * If a pipe object was given a dynamically allocated buffer via
4795 : : * k_pipe_alloc_init(), this will free it. This function does nothing
4796 : : * if the buffer wasn't dynamically allocated.
4797 : : *
4798 : : * @param pipe Address of the pipe.
4799 : : * @retval 0 on success
4800 : : * @retval -EAGAIN nothing to cleanup
4801 : : */
4802 : : int k_pipe_cleanup(struct k_pipe *pipe);
4803 : :
4804 : : /**
4805 : : * @brief Initialize a pipe and allocate a buffer for it
4806 : : *
4807 : : * Storage for the buffer region will be allocated from the calling thread's
4808 : : * resource pool. This memory will be released if k_pipe_cleanup() is called,
4809 : : * or userspace is enabled and the pipe object loses all references to it.
4810 : : *
4811 : : * This function should only be called on uninitialized pipe objects.
4812 : : *
4813 : : * @param pipe Address of the pipe.
4814 : : * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4815 : : * buffer is used.
4816 : : * @retval 0 on success
4817 : : * @retval -ENOMEM if memory couldn't be allocated
4818 : : */
4819 : : __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
4820 : :
4821 : : /**
4822 : : * @brief Write data to a pipe.
4823 : : *
4824 : : * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
4825 : : *
4826 : : * @param pipe Address of the pipe.
4827 : : * @param data Address of data to write.
4828 : : * @param bytes_to_write Size of data (in bytes).
4829 : : * @param bytes_written Address of area to hold the number of bytes written.
4830 : : * @param min_xfer Minimum number of bytes to write.
4831 : : * @param timeout Waiting period to wait for the data to be written,
4832 : : * or one of the special values K_NO_WAIT and K_FOREVER.
4833 : : *
4834 : : * @retval 0 At least @a min_xfer bytes of data were written.
4835 : : * @retval -EIO Returned without waiting; zero data bytes were written.
4836 : : * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4837 : : * minus one data bytes were written.
4838 : : */
4839 : : __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4840 : : size_t bytes_to_write, size_t *bytes_written,
4841 : : size_t min_xfer, k_timeout_t timeout);
4842 : :
4843 : : /**
4844 : : * @brief Read data from a pipe.
4845 : : *
4846 : : * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
4847 : : *
4848 : : * @param pipe Address of the pipe.
4849 : : * @param data Address to place the data read from pipe.
4850 : : * @param bytes_to_read Maximum number of data bytes to read.
4851 : : * @param bytes_read Address of area to hold the number of bytes read.
4852 : : * @param min_xfer Minimum number of data bytes to read.
4853 : : * @param timeout Waiting period to wait for the data to be read,
4854 : : * or one of the special values K_NO_WAIT and K_FOREVER.
4855 : : *
4856 : : * @retval 0 At least @a min_xfer bytes of data were read.
4857 : : * @retval -EINVAL invalid parameters supplied
4858 : : * @retval -EIO Returned without waiting; zero data bytes were read.
4859 : : * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
4860 : : * minus one data bytes were read.
4861 : : */
4862 : : __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4863 : : size_t bytes_to_read, size_t *bytes_read,
4864 : : size_t min_xfer, k_timeout_t timeout);
4865 : :
4866 : : /**
4867 : : * @brief Query the number of bytes that may be read from @a pipe.
4868 : : *
4869 : : * @param pipe Address of the pipe.
4870 : : *
4871 : : * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4872 : : * result is zero for unbuffered pipes.
4873 : : */
4874 : : __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4875 : :
4876 : : /**
4877 : : * @brief Query the number of bytes that may be written to @a pipe
4878 : : *
4879 : : * @param pipe Address of the pipe.
4880 : : *
4881 : : * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4882 : : * result is zero for unbuffered pipes.
4883 : : */
4884 : : __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4885 : :
4886 : : /**
4887 : : * @brief Flush the pipe of write data
4888 : : *
4889 : : * This routine flushes the pipe. Flushing the pipe is equivalent to reading
4890 : : * both all the data in the pipe's buffer and all the data waiting to go into
4891 : : * that pipe into a large temporary buffer and discarding the buffer. Any
4892 : : * writers that were previously pended become unpended.
4893 : : *
4894 : : * @param pipe Address of the pipe.
4895 : : */
4896 : : __syscall void k_pipe_flush(struct k_pipe *pipe);
4897 : :
4898 : : /**
4899 : : * @brief Flush the pipe's internal buffer
4900 : : *
4901 : : * This routine flushes the pipe's internal buffer. This is equivalent to
4902 : : * reading up to N bytes from the pipe (where N is the size of the pipe's
4903 : : * buffer) into a temporary buffer and then discarding that buffer. If there
4904 : : * were writers previously pending, then some may unpend as they try to fill
4905 : : * up the pipe's emptied buffer.
4906 : : *
4907 : : * @param pipe Address of the pipe.
4908 : : */
4909 : : __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
4910 : :
4911 : : /** @} */
4912 : :
4913 : : /**
4914 : : * @cond INTERNAL_HIDDEN
4915 : : */
4916 : :
4917 : : struct k_mem_slab {
4918 : : _wait_q_t wait_q;
4919 : : struct k_spinlock lock;
4920 : : uint32_t num_blocks;
4921 : : size_t block_size;
4922 : : char *buffer;
4923 : : char *free_list;
4924 : : uint32_t num_used;
4925 : : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4926 : : uint32_t max_used;
4927 : : #endif
4928 : :
4929 : : SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
4930 : : };
4931 : :
4932 : : #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
4933 : : slab_num_blocks) \
4934 : : { \
4935 : : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4936 : : .lock = {}, \
4937 : : .num_blocks = slab_num_blocks, \
4938 : : .block_size = slab_block_size, \
4939 : : .buffer = slab_buffer, \
4940 : : .free_list = NULL, \
4941 : : .num_used = 0, \
4942 : : }
4943 : :
4944 : :
4945 : : /**
4946 : : * INTERNAL_HIDDEN @endcond
4947 : : */
4948 : :
4949 : : /**
4950 : : * @defgroup mem_slab_apis Memory Slab APIs
4951 : : * @ingroup kernel_apis
4952 : : * @{
4953 : : */
4954 : :
4955 : : /**
4956 : : * @brief Statically define and initialize a memory slab in a public (non-static) scope.
4957 : : *
4958 : : * The memory slab's buffer contains @a slab_num_blocks memory blocks
4959 : : * that are @a slab_block_size bytes long. The buffer is aligned to a
4960 : : * @a slab_align -byte boundary. To ensure that each memory block is similarly
4961 : : * aligned to this boundary, @a slab_block_size must also be a multiple of
4962 : : * @a slab_align.
4963 : : *
4964 : : * The memory slab can be accessed outside the module where it is defined
4965 : : * using:
4966 : : *
4967 : : * @code extern struct k_mem_slab <name>; @endcode
4968 : : *
4969 : : * @note This macro cannot be used together with a static keyword.
4970 : : * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
4971 : : * instead.
4972 : : *
4973 : : * @param name Name of the memory slab.
4974 : : * @param slab_block_size Size of each memory block (in bytes).
4975 : : * @param slab_num_blocks Number memory blocks.
4976 : : * @param slab_align Alignment of the memory slab's buffer (power of 2).
4977 : : */
4978 : : #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
4979 : : char __noinit_named(k_mem_slab_buf_##name) \
4980 : : __aligned(WB_UP(slab_align)) \
4981 : : _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
4982 : : STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
4983 : : Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
4984 : : WB_UP(slab_block_size), slab_num_blocks)
4985 : :
4986 : : /**
4987 : : * @brief Statically define and initialize a memory slab in a private (static) scope.
4988 : : *
4989 : : * The memory slab's buffer contains @a slab_num_blocks memory blocks
4990 : : * that are @a slab_block_size bytes long. The buffer is aligned to a
4991 : : * @a slab_align -byte boundary. To ensure that each memory block is similarly
4992 : : * aligned to this boundary, @a slab_block_size must also be a multiple of
4993 : : * @a slab_align.
4994 : : *
4995 : : * @param name Name of the memory slab.
4996 : : * @param slab_block_size Size of each memory block (in bytes).
4997 : : * @param slab_num_blocks Number memory blocks.
4998 : : * @param slab_align Alignment of the memory slab's buffer (power of 2).
4999 : : */
5000 : : #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5001 : : static char __noinit_named(k_mem_slab_buf_##name) \
5002 : : __aligned(WB_UP(slab_align)) \
5003 : : _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5004 : : static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5005 : : Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5006 : : WB_UP(slab_block_size), slab_num_blocks)
5007 : :
5008 : : /**
5009 : : * @brief Initialize a memory slab.
5010 : : *
5011 : : * Initializes a memory slab, prior to its first use.
5012 : : *
5013 : : * The memory slab's buffer contains @a slab_num_blocks memory blocks
5014 : : * that are @a slab_block_size bytes long. The buffer must be aligned to an
5015 : : * N-byte boundary matching a word boundary, where N is a power of 2
5016 : : * (i.e. 4 on 32-bit systems, 8, 16, ...).
5017 : : * To ensure that each memory block is similarly aligned to this boundary,
5018 : : * @a slab_block_size must also be a multiple of N.
5019 : : *
5020 : : * @param slab Address of the memory slab.
5021 : : * @param buffer Pointer to buffer used for the memory blocks.
5022 : : * @param block_size Size of each memory block (in bytes).
5023 : : * @param num_blocks Number of memory blocks.
5024 : : *
5025 : : * @retval 0 on success
5026 : : * @retval -EINVAL invalid data supplied
5027 : : *
5028 : : */
5029 : : extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5030 : : size_t block_size, uint32_t num_blocks);
5031 : :
5032 : : /**
5033 : : * @brief Allocate memory from a memory slab.
5034 : : *
5035 : : * This routine allocates a memory block from a memory slab.
5036 : : *
5037 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5038 : : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5039 : : *
5040 : : * @funcprops \isr_ok
5041 : : *
5042 : : * @param slab Address of the memory slab.
5043 : : * @param mem Pointer to block address area.
5044 : : * @param timeout Non-negative waiting period to wait for operation to complete.
5045 : : * Use K_NO_WAIT to return without waiting,
5046 : : * or K_FOREVER to wait as long as necessary.
5047 : : *
5048 : : * @retval 0 Memory allocated. The block address area pointed at by @a mem
5049 : : * is set to the starting address of the memory block.
5050 : : * @retval -ENOMEM Returned without waiting.
5051 : : * @retval -EAGAIN Waiting period timed out.
5052 : : * @retval -EINVAL Invalid data supplied
5053 : : */
5054 : : extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5055 : : k_timeout_t timeout);
5056 : :
5057 : : /**
5058 : : * @brief Free memory allocated from a memory slab.
5059 : : *
5060 : : * This routine releases a previously allocated memory block back to its
5061 : : * associated memory slab.
5062 : : *
5063 : : * @param slab Address of the memory slab.
5064 : : * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
5065 : : */
5066 : : extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
5067 : :
5068 : : /**
5069 : : * @brief Get the number of used blocks in a memory slab.
5070 : : *
5071 : : * This routine gets the number of memory blocks that are currently
5072 : : * allocated in @a slab.
5073 : : *
5074 : : * @param slab Address of the memory slab.
5075 : : *
5076 : : * @return Number of allocated memory blocks.
5077 : : */
5078 : : static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5079 : : {
5080 : : return slab->num_used;
5081 : : }
5082 : :
5083 : : /**
5084 : : * @brief Get the number of maximum used blocks so far in a memory slab.
5085 : : *
5086 : : * This routine gets the maximum number of memory blocks that were
5087 : : * allocated in @a slab.
5088 : : *
5089 : : * @param slab Address of the memory slab.
5090 : : *
5091 : : * @return Maximum number of allocated memory blocks.
5092 : : */
5093 : : static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5094 : : {
5095 : : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5096 : : return slab->max_used;
5097 : : #else
5098 : : ARG_UNUSED(slab);
5099 : : return 0;
5100 : : #endif
5101 : : }
5102 : :
5103 : : /**
5104 : : * @brief Get the number of unused blocks in a memory slab.
5105 : : *
5106 : : * This routine gets the number of memory blocks that are currently
5107 : : * unallocated in @a slab.
5108 : : *
5109 : : * @param slab Address of the memory slab.
5110 : : *
5111 : : * @return Number of unallocated memory blocks.
5112 : : */
5113 : : static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5114 : : {
5115 : : return slab->num_blocks - slab->num_used;
5116 : : }
5117 : :
5118 : : /** @} */
5119 : :
5120 : : /**
5121 : : * @addtogroup heap_apis
5122 : : * @{
5123 : : */
5124 : :
5125 : : /* kernel synchronized heap struct */
5126 : :
5127 : : struct k_heap {
5128 : : struct sys_heap heap;
5129 : : _wait_q_t wait_q;
5130 : : struct k_spinlock lock;
5131 : : };
5132 : :
5133 : : /**
5134 : : * @brief Initialize a k_heap
5135 : : *
5136 : : * This constructs a synchronized k_heap object over a memory region
5137 : : * specified by the user. Note that while any alignment and size can
5138 : : * be passed as valid parameters, internal alignment restrictions
5139 : : * inside the inner sys_heap mean that not all bytes may be usable as
5140 : : * allocated memory.
5141 : : *
5142 : : * @param h Heap struct to initialize
5143 : : * @param mem Pointer to memory.
5144 : : * @param bytes Size of memory region, in bytes
5145 : : */
5146 : : void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
5147 : :
5148 : : /** @brief Allocate aligned memory from a k_heap
5149 : : *
5150 : : * Behaves in all ways like k_heap_alloc(), except that the returned
5151 : : * memory (if available) will have a starting address in memory which
5152 : : * is a multiple of the specified power-of-two alignment value in
5153 : : * bytes. The resulting memory can be returned to the heap using
5154 : : * k_heap_free().
5155 : : *
5156 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5157 : : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5158 : : *
5159 : : * @funcprops \isr_ok
5160 : : *
5161 : : * @param h Heap from which to allocate
5162 : : * @param align Alignment in bytes, must be a power of two
5163 : : * @param bytes Number of bytes requested
5164 : : * @param timeout How long to wait, or K_NO_WAIT
5165 : : * @return Pointer to memory the caller can now use
5166 : : */
5167 : : void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5168 : : k_timeout_t timeout);
5169 : :
5170 : : /**
5171 : : * @brief Allocate memory from a k_heap
5172 : : *
5173 : : * Allocates and returns a memory buffer from the memory region owned
5174 : : * by the heap. If no memory is available immediately, the call will
5175 : : * block for the specified timeout (constructed via the standard
5176 : : * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5177 : : * freed. If the allocation cannot be performed by the expiration of
5178 : : * the timeout, NULL will be returned.
5179 : : *
5180 : : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5181 : : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5182 : : *
5183 : : * @funcprops \isr_ok
5184 : : *
5185 : : * @param h Heap from which to allocate
5186 : : * @param bytes Desired size of block to allocate
5187 : : * @param timeout How long to wait, or K_NO_WAIT
5188 : : * @return A pointer to valid heap memory, or NULL
5189 : : */
5190 : : void *k_heap_alloc(struct k_heap *h, size_t bytes,
5191 : : k_timeout_t timeout);
5192 : :
5193 : : /**
5194 : : * @brief Free memory allocated by k_heap_alloc()
5195 : : *
5196 : : * Returns the specified memory block, which must have been returned
5197 : : * from k_heap_alloc(), to the heap for use by other callers. Passing
5198 : : * a NULL block is legal, and has no effect.
5199 : : *
5200 : : * @param h Heap to which to return the memory
5201 : : * @param mem A valid memory block, or NULL
5202 : : */
5203 : : void k_heap_free(struct k_heap *h, void *mem);
5204 : :
5205 : : /* Hand-calculated minimum heap sizes needed to return a successful
5206 : : * 1-byte allocation. See details in lib/os/heap.[ch]
5207 : : */
5208 : : #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5209 : :
5210 : : /**
5211 : : * @brief Define a static k_heap in the specified linker section
5212 : : *
5213 : : * This macro defines and initializes a static memory region and
5214 : : * k_heap of the requested size in the specified linker section.
5215 : : * After kernel start, &name can be used as if k_heap_init() had
5216 : : * been called.
5217 : : *
5218 : : * Note that this macro enforces a minimum size on the memory region
5219 : : * to accommodate metadata requirements. Very small heaps will be
5220 : : * padded to fit.
5221 : : *
5222 : : * @param name Symbol name for the struct k_heap object
5223 : : * @param bytes Size of memory region, in bytes
5224 : : * @param in_section __attribute__((section(name))
5225 : : */
5226 : : #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5227 : : char in_section \
5228 : : __aligned(8) /* CHUNK_UNIT */ \
5229 : : kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5230 : : STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5231 : : .heap = { \
5232 : : .init_mem = kheap_##name, \
5233 : : .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5234 : : }, \
5235 : : }
5236 : :
5237 : : /**
5238 : : * @brief Define a static k_heap
5239 : : *
5240 : : * This macro defines and initializes a static memory region and
5241 : : * k_heap of the requested size. After kernel start, &name can be
5242 : : * used as if k_heap_init() had been called.
5243 : : *
5244 : : * Note that this macro enforces a minimum size on the memory region
5245 : : * to accommodate metadata requirements. Very small heaps will be
5246 : : * padded to fit.
5247 : : *
5248 : : * @param name Symbol name for the struct k_heap object
5249 : : * @param bytes Size of memory region, in bytes
5250 : : */
5251 : : #define K_HEAP_DEFINE(name, bytes) \
5252 : : Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5253 : : __noinit_named(kheap_buf_##name))
5254 : :
5255 : : /**
5256 : : * @brief Define a static k_heap in uncached memory
5257 : : *
5258 : : * This macro defines and initializes a static memory region and
5259 : : * k_heap of the requested size in uncached memory. After kernel
5260 : : * start, &name can be used as if k_heap_init() had been called.
5261 : : *
5262 : : * Note that this macro enforces a minimum size on the memory region
5263 : : * to accommodate metadata requirements. Very small heaps will be
5264 : : * padded to fit.
5265 : : *
5266 : : * @param name Symbol name for the struct k_heap object
5267 : : * @param bytes Size of memory region, in bytes
5268 : : */
5269 : : #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5270 : : Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5271 : :
5272 : : /**
5273 : : * @}
5274 : : */
5275 : :
5276 : : /**
5277 : : * @defgroup heap_apis Heap APIs
5278 : : * @ingroup kernel_apis
5279 : : * @{
5280 : : */
5281 : :
5282 : : /**
5283 : : * @brief Allocate memory from the heap with a specified alignment.
5284 : : *
5285 : : * This routine provides semantics similar to aligned_alloc(); memory is
5286 : : * allocated from the heap with a specified alignment. However, one minor
5287 : : * difference is that k_aligned_alloc() accepts any non-zero @p size,
5288 : : * whereas aligned_alloc() only accepts a @p size that is an integral
5289 : : * multiple of @p align.
5290 : : *
5291 : : * Above, aligned_alloc() refers to:
5292 : : * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5293 : : * The aligned_alloc function (p: 347-348)
5294 : : *
5295 : : * @param align Alignment of memory requested (in bytes).
5296 : : * @param size Amount of memory requested (in bytes).
5297 : : *
5298 : : * @return Address of the allocated memory if successful; otherwise NULL.
5299 : : */
5300 : : extern void *k_aligned_alloc(size_t align, size_t size);
5301 : :
5302 : : /**
5303 : : * @brief Allocate memory from the heap.
5304 : : *
5305 : : * This routine provides traditional malloc() semantics. Memory is
5306 : : * allocated from the heap memory pool.
5307 : : *
5308 : : * @param size Amount of memory requested (in bytes).
5309 : : *
5310 : : * @return Address of the allocated memory if successful; otherwise NULL.
5311 : : */
5312 : : extern void *k_malloc(size_t size);
5313 : :
5314 : : /**
5315 : : * @brief Free memory allocated from heap.
5316 : : *
5317 : : * This routine provides traditional free() semantics. The memory being
5318 : : * returned must have been allocated from the heap memory pool or
5319 : : * k_mem_pool_malloc().
5320 : : *
5321 : : * If @a ptr is NULL, no operation is performed.
5322 : : *
5323 : : * @param ptr Pointer to previously allocated memory.
5324 : : */
5325 : : extern void k_free(void *ptr);
5326 : :
5327 : : /**
5328 : : * @brief Allocate memory from heap, array style
5329 : : *
5330 : : * This routine provides traditional calloc() semantics. Memory is
5331 : : * allocated from the heap memory pool and zeroed.
5332 : : *
5333 : : * @param nmemb Number of elements in the requested array
5334 : : * @param size Size of each array element (in bytes).
5335 : : *
5336 : : * @return Address of the allocated memory if successful; otherwise NULL.
5337 : : */
5338 : : extern void *k_calloc(size_t nmemb, size_t size);
5339 : :
5340 : : /** @} */
5341 : :
5342 : : /* polling API - PRIVATE */
5343 : :
5344 : : #ifdef CONFIG_POLL
5345 : : #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5346 : : #else
5347 : : #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5348 : : #endif
5349 : :
5350 : : /* private - types bit positions */
5351 : : enum _poll_types_bits {
5352 : : /* can be used to ignore an event */
5353 : : _POLL_TYPE_IGNORE,
5354 : :
5355 : : /* to be signaled by k_poll_signal_raise() */
5356 : : _POLL_TYPE_SIGNAL,
5357 : :
5358 : : /* semaphore availability */
5359 : : _POLL_TYPE_SEM_AVAILABLE,
5360 : :
5361 : : /* queue/FIFO/LIFO data availability */
5362 : : _POLL_TYPE_DATA_AVAILABLE,
5363 : :
5364 : : /* msgq data availability */
5365 : : _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5366 : :
5367 : : _POLL_NUM_TYPES
5368 : : };
5369 : :
5370 : : #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5371 : :
5372 : : /* private - states bit positions */
5373 : : enum _poll_states_bits {
5374 : : /* default state when creating event */
5375 : : _POLL_STATE_NOT_READY,
5376 : :
5377 : : /* signaled by k_poll_signal_raise() */
5378 : : _POLL_STATE_SIGNALED,
5379 : :
5380 : : /* semaphore is available */
5381 : : _POLL_STATE_SEM_AVAILABLE,
5382 : :
5383 : : /* data is available to read on queue/FIFO/LIFO */
5384 : : _POLL_STATE_DATA_AVAILABLE,
5385 : :
5386 : : /* queue/FIFO/LIFO wait was cancelled */
5387 : : _POLL_STATE_CANCELLED,
5388 : :
5389 : : /* data is available to read on a message queue */
5390 : : _POLL_STATE_MSGQ_DATA_AVAILABLE,
5391 : :
5392 : : _POLL_NUM_STATES
5393 : : };
5394 : :
5395 : : #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5396 : :
5397 : : #define _POLL_EVENT_NUM_UNUSED_BITS \
5398 : : (32 - (0 \
5399 : : + 8 /* tag */ \
5400 : : + _POLL_NUM_TYPES \
5401 : : + _POLL_NUM_STATES \
5402 : : + 1 /* modes */ \
5403 : : ))
5404 : :
5405 : : /* end of polling API - PRIVATE */
5406 : :
5407 : :
5408 : : /**
5409 : : * @defgroup poll_apis Async polling APIs
5410 : : * @ingroup kernel_apis
5411 : : * @{
5412 : : */
5413 : :
5414 : : /* Public polling API */
5415 : :
5416 : : /* public - values for k_poll_event.type bitfield */
5417 : : #define K_POLL_TYPE_IGNORE 0
5418 : : #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5419 : : #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5420 : : #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5421 : : #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5422 : : #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5423 : :
5424 : : /* public - polling modes */
5425 : : enum k_poll_modes {
5426 : : /* polling thread does not take ownership of objects when available */
5427 : : K_POLL_MODE_NOTIFY_ONLY = 0,
5428 : :
5429 : : K_POLL_NUM_MODES
5430 : : };
5431 : :
5432 : : /* public - values for k_poll_event.state bitfield */
5433 : : #define K_POLL_STATE_NOT_READY 0
5434 : : #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5435 : : #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5436 : : #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5437 : : #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5438 : : #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5439 : : #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5440 : :
5441 : : /* public - poll signal object */
5442 : : struct k_poll_signal {
5443 : : /** PRIVATE - DO NOT TOUCH */
5444 : : sys_dlist_t poll_events;
5445 : :
5446 : : /**
5447 : : * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5448 : : * user resets it to 0.
5449 : : */
5450 : : unsigned int signaled;
5451 : :
5452 : : /** custom result value passed to k_poll_signal_raise() if needed */
5453 : : int result;
5454 : : };
5455 : :
5456 : : #define K_POLL_SIGNAL_INITIALIZER(obj) \
5457 : : { \
5458 : : .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5459 : : .signaled = 0, \
5460 : : .result = 0, \
5461 : : }
5462 : : /**
5463 : : * @brief Poll Event
5464 : : *
5465 : : */
5466 : : struct k_poll_event {
5467 : : /** PRIVATE - DO NOT TOUCH */
5468 : : sys_dnode_t _node;
5469 : :
5470 : : /** PRIVATE - DO NOT TOUCH */
5471 : : struct z_poller *poller;
5472 : :
5473 : : /** optional user-specified tag, opaque, untouched by the API */
5474 : : uint32_t tag:8;
5475 : :
5476 : : /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5477 : : uint32_t type:_POLL_NUM_TYPES;
5478 : :
5479 : : /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5480 : : uint32_t state:_POLL_NUM_STATES;
5481 : :
5482 : : /** mode of operation, from enum k_poll_modes */
5483 : : uint32_t mode:1;
5484 : :
5485 : : /** unused bits in 32-bit word */
5486 : : uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5487 : :
5488 : : /** per-type data */
5489 : : union {
5490 : : void *obj;
5491 : : struct k_poll_signal *signal;
5492 : : struct k_sem *sem;
5493 : : struct k_fifo *fifo;
5494 : : struct k_queue *queue;
5495 : : struct k_msgq *msgq;
5496 : : };
5497 : : };
5498 : :
5499 : : #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5500 : : { \
5501 : : .poller = NULL, \
5502 : : .type = _event_type, \
5503 : : .state = K_POLL_STATE_NOT_READY, \
5504 : : .mode = _event_mode, \
5505 : : .unused = 0, \
5506 : : { \
5507 : : .obj = _event_obj, \
5508 : : }, \
5509 : : }
5510 : :
5511 : : #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5512 : : event_tag) \
5513 : : { \
5514 : : .tag = event_tag, \
5515 : : .type = _event_type, \
5516 : : .state = K_POLL_STATE_NOT_READY, \
5517 : : .mode = _event_mode, \
5518 : : .unused = 0, \
5519 : : { \
5520 : : .obj = _event_obj, \
5521 : : }, \
5522 : : }
5523 : :
5524 : : /**
5525 : : * @brief Initialize one struct k_poll_event instance
5526 : : *
5527 : : * After this routine is called on a poll event, the event it ready to be
5528 : : * placed in an event array to be passed to k_poll().
5529 : : *
5530 : : * @param event The event to initialize.
5531 : : * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5532 : : * values. Only values that apply to the same object being polled
5533 : : * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5534 : : * event.
5535 : : * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5536 : : * @param obj Kernel object or poll signal.
5537 : : */
5538 : :
5539 : : extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5540 : : int mode, void *obj);
5541 : :
5542 : : /**
5543 : : * @brief Wait for one or many of multiple poll events to occur
5544 : : *
5545 : : * This routine allows a thread to wait concurrently for one or many of
5546 : : * multiple poll events to have occurred. Such events can be a kernel object
5547 : : * being available, like a semaphore, or a poll signal event.
5548 : : *
5549 : : * When an event notifies that a kernel object is available, the kernel object
5550 : : * is not "given" to the thread calling k_poll(): it merely signals the fact
5551 : : * that the object was available when the k_poll() call was in effect. Also,
5552 : : * all threads trying to acquire an object the regular way, i.e. by pending on
5553 : : * the object, have precedence over the thread polling on the object. This
5554 : : * means that the polling thread will never get the poll event on an object
5555 : : * until the object becomes available and its pend queue is empty. For this
5556 : : * reason, the k_poll() call is more effective when the objects being polled
5557 : : * only have one thread, the polling thread, trying to acquire them.
5558 : : *
5559 : : * When k_poll() returns 0, the caller should loop on all the events that were
5560 : : * passed to k_poll() and check the state field for the values that were
5561 : : * expected and take the associated actions.
5562 : : *
5563 : : * Before being reused for another call to k_poll(), the user has to reset the
5564 : : * state field to K_POLL_STATE_NOT_READY.
5565 : : *
5566 : : * When called from user mode, a temporary memory allocation is required from
5567 : : * the caller's resource pool.
5568 : : *
5569 : : * @param events An array of events to be polled for.
5570 : : * @param num_events The number of events in the array.
5571 : : * @param timeout Waiting period for an event to be ready,
5572 : : * or one of the special values K_NO_WAIT and K_FOREVER.
5573 : : *
5574 : : * @retval 0 One or more events are ready.
5575 : : * @retval -EAGAIN Waiting period timed out.
5576 : : * @retval -EINTR Polling has been interrupted, e.g. with
5577 : : * k_queue_cancel_wait(). All output events are still set and valid,
5578 : : * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5579 : : * words, -EINTR status means that at least one of output events is
5580 : : * K_POLL_STATE_CANCELLED.
5581 : : * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5582 : : * @retval -EINVAL Bad parameters (user mode only)
5583 : : */
5584 : :
5585 : : __syscall int k_poll(struct k_poll_event *events, int num_events,
5586 : : k_timeout_t timeout);
5587 : :
5588 : : /**
5589 : : * @brief Initialize a poll signal object.
5590 : : *
5591 : : * Ready a poll signal object to be signaled via k_poll_signal_raise().
5592 : : *
5593 : : * @param sig A poll signal.
5594 : : */
5595 : :
5596 : : __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5597 : :
5598 : : /*
5599 : : * @brief Reset a poll signal object's state to unsignaled.
5600 : : *
5601 : : * @param sig A poll signal object
5602 : : */
5603 : : __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5604 : :
5605 : : /**
5606 : : * @brief Fetch the signaled state and result value of a poll signal
5607 : : *
5608 : : * @param sig A poll signal object
5609 : : * @param signaled An integer buffer which will be written nonzero if the
5610 : : * object was signaled
5611 : : * @param result An integer destination buffer which will be written with the
5612 : : * result value if the object was signaled, or an undefined
5613 : : * value if it was not.
5614 : : */
5615 : : __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5616 : : unsigned int *signaled, int *result);
5617 : :
5618 : : /**
5619 : : * @brief Signal a poll signal object.
5620 : : *
5621 : : * This routine makes ready a poll signal, which is basically a poll event of
5622 : : * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5623 : : * made ready to run. A @a result value can be specified.
5624 : : *
5625 : : * The poll signal contains a 'signaled' field that, when set by
5626 : : * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5627 : : * k_poll_signal_reset(). It thus has to be reset by the user before being
5628 : : * passed again to k_poll() or k_poll() will consider it being signaled, and
5629 : : * will return immediately.
5630 : : *
5631 : : * @note The result is stored and the 'signaled' field is set even if
5632 : : * this function returns an error indicating that an expiring poll was
5633 : : * not notified. The next k_poll() will detect the missed raise.
5634 : : *
5635 : : * @param sig A poll signal.
5636 : : * @param result The value to store in the result field of the signal.
5637 : : *
5638 : : * @retval 0 The signal was delivered successfully.
5639 : : * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5640 : : */
5641 : :
5642 : : __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
5643 : :
5644 : : /**
5645 : : * @internal
5646 : : */
5647 : : extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
5648 : :
5649 : : /** @} */
5650 : :
5651 : : /**
5652 : : * @defgroup cpu_idle_apis CPU Idling APIs
5653 : : * @ingroup kernel_apis
5654 : : * @{
5655 : : */
5656 : : /**
5657 : : * @brief Make the CPU idle.
5658 : : *
5659 : : * This function makes the CPU idle until an event wakes it up.
5660 : : *
5661 : : * In a regular system, the idle thread should be the only thread responsible
5662 : : * for making the CPU idle and triggering any type of power management.
5663 : : * However, in some more constrained systems, such as a single-threaded system,
5664 : : * the only thread would be responsible for this if needed.
5665 : : *
5666 : : * @note In some architectures, before returning, the function unmasks interrupts
5667 : : * unconditionally.
5668 : : */
5669 : 0 : static inline void k_cpu_idle(void)
5670 : : {
5671 : 0 : arch_cpu_idle();
5672 : 0 : }
5673 : :
5674 : : /**
5675 : : * @brief Make the CPU idle in an atomic fashion.
5676 : : *
5677 : : * Similar to k_cpu_idle(), but must be called with interrupts locked.
5678 : : *
5679 : : * Enabling interrupts and entering a low-power mode will be atomic,
5680 : : * i.e. there will be no period of time where interrupts are enabled before
5681 : : * the processor enters a low-power mode.
5682 : : *
5683 : : * After waking up from the low-power mode, the interrupt lockout state will
5684 : : * be restored as if by irq_unlock(key).
5685 : : *
5686 : : * @param key Interrupt locking key obtained from irq_lock().
5687 : : */
5688 : 2 : static inline void k_cpu_atomic_idle(unsigned int key)
5689 : : {
5690 : 2 : arch_cpu_atomic_idle(key);
5691 : 2 : }
5692 : :
5693 : : /**
5694 : : * @}
5695 : : */
5696 : :
5697 : : /**
5698 : : * @internal
5699 : : */
5700 : : #ifdef ARCH_EXCEPT
5701 : : /* This architecture has direct support for triggering a CPU exception */
5702 : : #define z_except_reason(reason) ARCH_EXCEPT(reason)
5703 : : #else
5704 : :
5705 : : #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5706 : : #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5707 : : #else
5708 : : #define __EXCEPT_LOC()
5709 : : #endif
5710 : :
5711 : : /* NOTE: This is the implementation for arches that do not implement
5712 : : * ARCH_EXCEPT() to generate a real CPU exception.
5713 : : *
5714 : : * We won't have a real exception frame to determine the PC value when
5715 : : * the oops occurred, so print file and line number before we jump into
5716 : : * the fatal error handler.
5717 : : */
5718 : : #define z_except_reason(reason) do { \
5719 : : __EXCEPT_LOC(); \
5720 : : z_fatal_error(reason, NULL); \
5721 : : } while (false)
5722 : :
5723 : : #endif /* _ARCH__EXCEPT */
5724 : :
5725 : : /**
5726 : : * @brief Fatally terminate a thread
5727 : : *
5728 : : * This should be called when a thread has encountered an unrecoverable
5729 : : * runtime condition and needs to terminate. What this ultimately
5730 : : * means is determined by the _fatal_error_handler() implementation, which
5731 : : * will be called will reason code K_ERR_KERNEL_OOPS.
5732 : : *
5733 : : * If this is called from ISR context, the default system fatal error handler
5734 : : * will treat it as an unrecoverable system error, just like k_panic().
5735 : : */
5736 : : #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
5737 : :
5738 : : /**
5739 : : * @brief Fatally terminate the system
5740 : : *
5741 : : * This should be called when the Zephyr kernel has encountered an
5742 : : * unrecoverable runtime condition and needs to terminate. What this ultimately
5743 : : * means is determined by the _fatal_error_handler() implementation, which
5744 : : * will be called will reason code K_ERR_KERNEL_PANIC.
5745 : : */
5746 : : #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
5747 : :
5748 : : /*
5749 : : * private APIs that are utilized by one or more public APIs
5750 : : */
5751 : :
5752 : : /**
5753 : : * @internal
5754 : : */
5755 : : extern void z_init_thread_base(struct _thread_base *thread_base,
5756 : : int priority, uint32_t initial_state,
5757 : : unsigned int options);
5758 : :
5759 : : #ifdef CONFIG_MULTITHREADING
5760 : : /**
5761 : : * @internal
5762 : : */
5763 : : extern void z_init_static_threads(void);
5764 : : #else
5765 : : /**
5766 : : * @internal
5767 : : */
5768 : : #define z_init_static_threads() do { } while (false)
5769 : : #endif
5770 : :
5771 : : /**
5772 : : * @internal
5773 : : */
5774 : : extern bool z_is_thread_essential(void);
5775 : :
5776 : : #ifdef CONFIG_SMP
5777 : : void z_smp_thread_init(void *arg, struct k_thread *thread);
5778 : : void z_smp_thread_swap(void);
5779 : : #endif
5780 : :
5781 : : /**
5782 : : * @internal
5783 : : */
5784 : : extern void z_timer_expiration_handler(struct _timeout *t);
5785 : :
5786 : : #ifdef CONFIG_PRINTK
5787 : : /**
5788 : : * @brief Emit a character buffer to the console device
5789 : : *
5790 : : * @param c String of characters to print
5791 : : * @param n The length of the string
5792 : : *
5793 : : */
5794 : : __syscall void k_str_out(char *c, size_t n);
5795 : : #endif
5796 : :
5797 : : /**
5798 : : * @brief Disable preservation of floating point context information.
5799 : : *
5800 : : * This routine informs the kernel that the specified thread
5801 : : * will no longer be using the floating point registers.
5802 : : *
5803 : : * @warning
5804 : : * Some architectures apply restrictions on how the disabling of floating
5805 : : * point preservation may be requested, see arch_float_disable.
5806 : : *
5807 : : * @warning
5808 : : * This routine should only be used to disable floating point support for
5809 : : * a thread that currently has such support enabled.
5810 : : *
5811 : : * @param thread ID of thread.
5812 : : *
5813 : : * @retval 0 On success.
5814 : : * @retval -ENOTSUP If the floating point disabling is not implemented.
5815 : : * -EINVAL If the floating point disabling could not be performed.
5816 : : */
5817 : : __syscall int k_float_disable(struct k_thread *thread);
5818 : :
5819 : : /**
5820 : : * @brief Enable preservation of floating point context information.
5821 : : *
5822 : : * This routine informs the kernel that the specified thread
5823 : : * will use the floating point registers.
5824 : :
5825 : : * Invoking this routine initializes the thread's floating point context info
5826 : : * to that of an FPU that has been reset. The next time the thread is scheduled
5827 : : * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5828 : : * "sane" state (if the most recent user of the FPU was cooperatively swapped
5829 : : * out) or the thread's own floating point context will be loaded (if the most
5830 : : * recent user of the FPU was preempted, or if this thread is the first user
5831 : : * of the FPU). Thereafter, the kernel will protect the thread's FP context
5832 : : * so that it is not altered during a preemptive context switch.
5833 : : *
5834 : : * The @a options parameter indicates which floating point register sets will
5835 : : * be used by the specified thread.
5836 : : *
5837 : : * For x86 options:
5838 : : *
5839 : : * - K_FP_REGS indicates x87 FPU and MMX registers only
5840 : : * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5841 : : *
5842 : : * @warning
5843 : : * Some architectures apply restrictions on how the enabling of floating
5844 : : * point preservation may be requested, see arch_float_enable.
5845 : : *
5846 : : * @warning
5847 : : * This routine should only be used to enable floating point support for
5848 : : * a thread that currently has such support enabled.
5849 : : *
5850 : : * @param thread ID of thread.
5851 : : * @param options architecture dependent options
5852 : : *
5853 : : * @retval 0 On success.
5854 : : * @retval -ENOTSUP If the floating point enabling is not implemented.
5855 : : * -EINVAL If the floating point enabling could not be performed.
5856 : : */
5857 : : __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5858 : :
5859 : : /**
5860 : : * @brief Get the runtime statistics of a thread
5861 : : *
5862 : : * @param thread ID of thread.
5863 : : * @param stats Pointer to struct to copy statistics into.
5864 : : * @return -EINVAL if null pointers, otherwise 0
5865 : : */
5866 : : int k_thread_runtime_stats_get(k_tid_t thread,
5867 : : k_thread_runtime_stats_t *stats);
5868 : :
5869 : : /**
5870 : : * @brief Get the runtime statistics of all threads
5871 : : *
5872 : : * @param stats Pointer to struct to copy statistics into.
5873 : : * @return -EINVAL if null pointers, otherwise 0
5874 : : */
5875 : : int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5876 : :
5877 : : /**
5878 : : * @brief Enable gathering of runtime statistics for specified thread
5879 : : *
5880 : : * This routine enables the gathering of runtime statistics for the specified
5881 : : * thread.
5882 : : *
5883 : : * @param thread ID of thread
5884 : : * @return -EINVAL if invalid thread ID, otherwise 0
5885 : : */
5886 : : extern int k_thread_runtime_stats_enable(k_tid_t thread);
5887 : :
5888 : : /**
5889 : : * @brief Disable gathering of runtime statistics for specified thread
5890 : : *
5891 : : * This routine disables the gathering of runtime statistics for the specified
5892 : : * thread.
5893 : : *
5894 : : * @param thread ID of thread
5895 : : * @return -EINVAL if invalid thread ID, otherwise 0
5896 : : */
5897 : : extern int k_thread_runtime_stats_disable(k_tid_t thread);
5898 : :
5899 : : /**
5900 : : * @brief Enable gathering of system runtime statistics
5901 : : *
5902 : : * This routine enables the gathering of system runtime statistics. Note that
5903 : : * it does not affect the gathering of similar statistics for individual
5904 : : * threads.
5905 : : */
5906 : : extern void k_sys_runtime_stats_enable(void);
5907 : :
5908 : : /**
5909 : : * @brief Disable gathering of system runtime statistics
5910 : : *
5911 : : * This routine disables the gathering of system runtime statistics. Note that
5912 : : * it does not affect the gathering of similar statistics for individual
5913 : : * threads.
5914 : : */
5915 : : extern void k_sys_runtime_stats_disable(void);
5916 : :
5917 : : #ifdef __cplusplus
5918 : : }
5919 : : #endif
5920 : :
5921 : : #include <tracing/tracing.h>
5922 : : #include <syscalls/kernel.h>
5923 : :
5924 : : #endif /* !_ASMLANGUAGE */
5925 : :
5926 : : #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
|