LCOV - code coverage report
Current view: top level - kernel/include - kswap.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 14 14 100.0 %
Date: 2022-08-18 11:36:24 Functions: 3 3 100.0 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Copyright (c) 2018 Intel Corporation
       3                 :            :  *
       4                 :            :  * SPDX-License-Identifier: Apache-2.0
       5                 :            :  */
       6                 :            : #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       7                 :            : #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       8                 :            : 
       9                 :            : #include <ksched.h>
      10                 :            : #include <spinlock.h>
      11                 :            : #include <kernel_arch_func.h>
      12                 :            : 
      13                 :            : #ifdef CONFIG_STACK_SENTINEL
      14                 :            : extern void z_check_stack_sentinel(void);
      15                 :            : #else
      16                 :            : #define z_check_stack_sentinel() /**/
      17                 :            : #endif
      18                 :            : 
      19                 :            : extern struct k_spinlock sched_spinlock;
      20                 :            : 
      21                 :            : /* In SMP, the irq_lock() is a spinlock which is implicitly released
      22                 :            :  * and reacquired on context switch to preserve the existing
      23                 :            :  * semantics.  This means that whenever we are about to return to a
      24                 :            :  * thread (via either z_swap() or interrupt/exception return!) we need
      25                 :            :  * to restore the lock state to whatever the thread's counter
      26                 :            :  * expects.
      27                 :            :  */
      28                 :            : void z_smp_release_global_lock(struct k_thread *thread);
      29                 :            : 
      30                 :            : /* context switching and scheduling-related routines */
      31                 :            : #ifdef CONFIG_USE_SWITCH
      32                 :            : 
      33                 :            : /* There is an unavoidable SMP race when threads swap -- their thread
      34                 :            :  * record is in the queue (and visible to other CPUs) before
      35                 :            :  * arch_switch() finishes saving state.  We must spin for the switch
      36                 :            :  * handle before entering a new thread.  See docs on arch_switch().
      37                 :            :  *
      38                 :            :  * Note: future SMP architectures may need a fence/barrier or cache
      39                 :            :  * invalidation here.  Current ones don't, and sadly Zephyr doesn't
      40                 :            :  * have a framework for that yet.
      41                 :            :  */
      42                 :            : static inline void wait_for_switch(struct k_thread *thread)
      43                 :            : {
      44                 :            : #ifdef CONFIG_SMP
      45                 :            :         volatile void **shp = (void *)&thread->switch_handle;
      46                 :            : 
      47                 :            :         while (*shp == NULL) {
      48                 :            :                 k_busy_wait(1);
      49                 :            :         }
      50                 :            : #endif
      51                 :            : }
      52                 :            : 
      53                 :            : /* New style context switching.  arch_switch() is a lower level
      54                 :            :  * primitive that doesn't know about the scheduler or return value.
      55                 :            :  * Needed for SMP, where the scheduler requires spinlocking that we
      56                 :            :  * don't want to have to do in per-architecture assembly.
      57                 :            :  *
      58                 :            :  * Note that is_spinlock is a compile-time construct which will be
      59                 :            :  * optimized out when this function is expanded.
      60                 :            :  */
      61                 :            : static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
      62                 :            :                                           struct k_spinlock *lock,
      63                 :            :                                           int is_spinlock)
      64                 :            : {
      65                 :            :         ARG_UNUSED(lock);
      66                 :            :         struct k_thread *new_thread, *old_thread;
      67                 :            : 
      68                 :            : #ifdef CONFIG_SPIN_VALIDATE
      69                 :            :         /* Make sure the key acts to unmask interrupts, if it doesn't,
      70                 :            :          * then we are context switching out of a nested lock
      71                 :            :          * (i.e. breaking the lock of someone up the stack) which is
      72                 :            :          * forbidden!  The sole exception are dummy threads used
      73                 :            :          * during initialization (where we start with interrupts
      74                 :            :          * masked and switch away to begin scheduling) and the case of
      75                 :            :          * a dead current thread that was just aborted (where the
      76                 :            :          * damage was already done by the abort anyway).
      77                 :            :          *
      78                 :            :          * (Note that this is disabled on ARM64, where system calls
      79                 :            :          * can sometimes run with interrupts masked in ways that don't
      80                 :            :          * represent lock state.  See #35307)
      81                 :            :          */
      82                 :            : # ifndef CONFIG_ARM64
      83                 :            :         __ASSERT(arch_irq_unlocked(key) ||
      84                 :            :                  _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
      85                 :            :                  "Context switching while holding lock!");
      86                 :            : # endif
      87                 :            : #endif
      88                 :            : 
      89                 :            :         old_thread = _current;
      90                 :            : 
      91                 :            :         z_check_stack_sentinel();
      92                 :            : 
      93                 :            :         old_thread->swap_retval = -EAGAIN;
      94                 :            : 
      95                 :            :         /* We always take the scheduler spinlock if we don't already
      96                 :            :          * have it.  We "release" other spinlocks here.  But we never
      97                 :            :          * drop the interrupt lock.
      98                 :            :          */
      99                 :            :         if (is_spinlock && lock != NULL && lock != &sched_spinlock) {
     100                 :            :                 k_spin_release(lock);
     101                 :            :         }
     102                 :            :         if (!is_spinlock || lock != &sched_spinlock) {
     103                 :            :                 (void) k_spin_lock(&sched_spinlock);
     104                 :            :         }
     105                 :            : 
     106                 :            :         new_thread = z_swap_next_thread();
     107                 :            : 
     108                 :            :         if (new_thread != old_thread) {
     109                 :            :                 z_sched_usage_switch(new_thread);
     110                 :            : 
     111                 :            : #ifdef CONFIG_SMP
     112                 :            :                 _current_cpu->swap_ok = 0;
     113                 :            :                 new_thread->base.cpu = arch_curr_cpu()->id;
     114                 :            : 
     115                 :            :                 if (!is_spinlock) {
     116                 :            :                         z_smp_release_global_lock(new_thread);
     117                 :            :                 }
     118                 :            : #endif
     119                 :            :                 z_thread_mark_switched_out();
     120                 :            :                 wait_for_switch(new_thread);
     121                 :            :                 _current_cpu->current = new_thread;
     122                 :            : 
     123                 :            : #ifdef CONFIG_TIMESLICING
     124                 :            :                 z_reset_time_slice(new_thread);
     125                 :            : #endif
     126                 :            : 
     127                 :            : #ifdef CONFIG_SPIN_VALIDATE
     128                 :            :                 z_spin_lock_set_owner(&sched_spinlock);
     129                 :            : #endif
     130                 :            : 
     131                 :            :                 arch_cohere_stacks(old_thread, NULL, new_thread);
     132                 :            : 
     133                 :            : #ifdef CONFIG_SMP
     134                 :            :                 /* Add _current back to the run queue HERE. After
     135                 :            :                  * wait_for_switch() we are guaranteed to reach the
     136                 :            :                  * context switch in finite time, avoiding a potential
     137                 :            :                  * deadlock.
     138                 :            :                  */
     139                 :            :                 z_requeue_current(old_thread);
     140                 :            : #endif
     141                 :            :                 void *newsh = new_thread->switch_handle;
     142                 :            : 
     143                 :            :                 if (IS_ENABLED(CONFIG_SMP)) {
     144                 :            :                         /* Active threads MUST have a null here */
     145                 :            :                         new_thread->switch_handle = NULL;
     146                 :            :                 }
     147                 :            :                 k_spin_release(&sched_spinlock);
     148                 :            :                 arch_switch(newsh, &old_thread->switch_handle);
     149                 :            :         } else {
     150                 :            :                 k_spin_release(&sched_spinlock);
     151                 :            :         }
     152                 :            : 
     153                 :            :         if (is_spinlock) {
     154                 :            :                 arch_irq_unlock(key);
     155                 :            :         } else {
     156                 :            :                 irq_unlock(key);
     157                 :            :         }
     158                 :            : 
     159                 :            :         return _current->swap_retval;
     160                 :            : }
     161                 :            : 
     162                 :            : static inline int z_swap_irqlock(unsigned int key)
     163                 :            : {
     164                 :            :         return do_swap(key, NULL, 0);
     165                 :            : }
     166                 :            : 
     167                 :            : static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     168                 :            : {
     169                 :            :         return do_swap(key.key, lock, 1);
     170                 :            : }
     171                 :            : 
     172                 :            : static inline void z_swap_unlocked(void)
     173                 :            : {
     174                 :            :         (void) do_swap(arch_irq_lock(), NULL, 1);
     175                 :            : }
     176                 :            : 
     177                 :            : #else /* !CONFIG_USE_SWITCH */
     178                 :            : 
     179                 :            : extern int arch_swap(unsigned int key);
     180                 :            : 
     181                 :          3 : static inline int z_swap_irqlock(unsigned int key)
     182                 :            : {
     183                 :            :         int ret;
     184                 :            :         z_check_stack_sentinel();
     185                 :          3 :         ret = arch_swap(key);
     186                 :          2 :         return ret;
     187                 :            : }
     188                 :            : 
     189                 :            : /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
     190                 :            :  * can't be in SMP.  The k_spin_release() call is just for validation
     191                 :            :  * handling.
     192                 :            :  */
     193                 :          2 : static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     194                 :            : {
     195                 :          2 :         k_spin_release(lock);
     196                 :          2 :         return z_swap_irqlock(key.key);
     197                 :            : }
     198                 :            : 
     199                 :            : static inline void z_swap_unlocked(void)
     200                 :            : {
     201                 :            :         (void) z_swap_irqlock(arch_irq_lock());
     202                 :            : }
     203                 :            : 
     204                 :            : #endif /* !CONFIG_USE_SWITCH */
     205                 :            : 
     206                 :            : /**
     207                 :            :  * Set up a "dummy" thread, used at early initialization to launch the
     208                 :            :  * first thread on a CPU.
     209                 :            :  *
     210                 :            :  * Needs to set enough fields such that the context switching code can
     211                 :            :  * use it to properly store state, which will just be discarded.
     212                 :            :  *
     213                 :            :  * The memory of the dummy thread can be completely uninitialized.
     214                 :            :  */
     215                 :          1 : static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
     216                 :            : {
     217                 :          1 :         dummy_thread->base.thread_state = _THREAD_DUMMY;
     218                 :            : #ifdef CONFIG_SCHED_CPU_MASK
     219                 :            :         dummy_thread->base.cpu_mask = -1;
     220                 :            : #endif
     221                 :          1 :         dummy_thread->base.user_options = K_ESSENTIAL;
     222                 :            : #ifdef CONFIG_THREAD_STACK_INFO
     223                 :          1 :         dummy_thread->stack_info.start = 0U;
     224                 :          1 :         dummy_thread->stack_info.size = 0U;
     225                 :            : #endif
     226                 :            : #ifdef CONFIG_USERSPACE
     227                 :            :         dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
     228                 :            : #endif
     229                 :            : #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
     230                 :            :         k_thread_system_pool_assign(dummy_thread);
     231                 :            : #else
     232                 :          1 :         dummy_thread->resource_pool = NULL;
     233                 :            : #endif
     234                 :            : 
     235                 :          1 :         _current_cpu->current = dummy_thread;
     236                 :          1 : }
     237                 :            : #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */

Generated by: LCOV version 1.14