LCOV - code coverage report
Current view: top level - kernel - idle.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 11 0.0 %
Date: 2022-08-18 11:36:24 Functions: 0 2 0.0 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 6 0.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Copyright (c) 2016 Wind River Systems, Inc.
       3                 :            :  *
       4                 :            :  * SPDX-License-Identifier: Apache-2.0
       5                 :            :  */
       6                 :            : 
       7                 :            : #include <kernel.h>
       8                 :            : #include <toolchain.h>
       9                 :            : #include <linker/sections.h>
      10                 :            : #include <drivers/timer/system_timer.h>
      11                 :            : #include <wait_q.h>
      12                 :            : #include <pm/pm.h>
      13                 :            : #include <stdbool.h>
      14                 :            : #include <logging/log.h>
      15                 :            : #include <ksched.h>
      16                 :            : #include <kswap.h>
      17                 :            : 
      18                 :            : LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
      19                 :            : 
      20                 :          0 : void z_pm_save_idle_exit(void)
      21                 :            : {
      22                 :            : #ifdef CONFIG_PM
      23                 :            :         /* Some CPU low power states require notification at the ISR
      24                 :            :          * to allow any operations that needs to be done before kernel
      25                 :            :          * switches task or processes nested interrupts.
      26                 :            :          * This can be simply ignored if not required.
      27                 :            :          */
      28                 :          0 :         pm_system_resume();
      29                 :            : #endif  /* CONFIG_PM */
      30                 :          0 :         sys_clock_idle_exit();
      31                 :          0 : }
      32                 :            : 
      33                 :          0 : void idle(void *unused1, void *unused2, void *unused3)
      34                 :            : {
      35                 :            :         ARG_UNUSED(unused1);
      36                 :            :         ARG_UNUSED(unused2);
      37                 :            :         ARG_UNUSED(unused3);
      38                 :            : 
      39         [ #  # ]:          0 :         __ASSERT_NO_MSG(_current->base.prio >= 0);
      40                 :            : 
      41                 :            :         while (true) {
      42                 :            :                 /* SMP systems without a working IPI can't
      43                 :            :                  * actual enter an idle state, because they
      44                 :            :                  * can't be notified of scheduler changes
      45                 :            :                  * (i.e. threads they should run).  They just
      46                 :            :                  * spin in a yield loop.  This is intended as
      47                 :            :                  * a fallback configuration for new platform
      48                 :            :                  * bringup.
      49                 :            :                  */
      50                 :          0 :                 if (IS_ENABLED(CONFIG_SMP) &&
      51                 :            :                     !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
      52                 :            :                         k_busy_wait(100);
      53                 :            :                         k_yield();
      54                 :            :                         continue;
      55                 :            :                 }
      56                 :            : 
      57                 :            :                 /* Note weird API: k_cpu_idle() is called with local
      58                 :            :                  * CPU interrupts masked, and returns with them
      59                 :            :                  * unmasked.  It does not take a spinlock or other
      60                 :            :                  * higher level construct.
      61                 :            :                  */
      62                 :          0 :                 (void) arch_irq_lock();
      63                 :            : 
      64                 :            : #ifdef CONFIG_PM
      65                 :          0 :                 _kernel.idle = z_get_next_timeout_expiry();
      66                 :            : 
      67                 :            :                 /*
      68                 :            :                  * Call the suspend hook function of the soc interface
      69                 :            :                  * to allow entry into a low power state. The function
      70                 :            :                  * returns false if low power state was not entered, in
      71                 :            :                  * which case, kernel does normal idle processing.
      72                 :            :                  *
      73                 :            :                  * This function is entered with interrupts disabled.
      74                 :            :                  * If a low power state was entered, then the hook
      75                 :            :                  * function should enable inerrupts before exiting.
      76                 :            :                  * This is because the kernel does not do its own idle
      77                 :            :                  * processing in those cases i.e. skips k_cpu_idle().
      78                 :            :                  * The kernel's idle processing re-enables interrupts
      79                 :            :                  * which is essential for the kernel's scheduling
      80                 :            :                  * logic.
      81                 :            :                  */
      82   [ #  #  #  # ]:          0 :                 if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
      83                 :          0 :                         k_cpu_idle();
      84                 :            :                 }
      85                 :            : #else
      86                 :            :                 k_cpu_idle();
      87                 :            : #endif
      88                 :            : 
      89                 :            : #if !defined(CONFIG_PREEMPT_ENABLED)
      90                 :            : # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
      91                 :            :                 /* A legacy mess: the idle thread is by definition
      92                 :            :                  * preemptible as far as the modern scheduler is
      93                 :            :                  * concerned, but older platforms use
      94                 :            :                  * CONFIG_PREEMPT_ENABLED=n as an optimization hint
      95                 :            :                  * that interrupt exit always returns to the
      96                 :            :                  * interrupted context.  So in that setup we need to
      97                 :            :                  * explicitly yield in the idle thread otherwise
      98                 :            :                  * nothing else will run once it starts.
      99                 :            :                  */
     100                 :            :                 if (_kernel.ready_q.cache != _current) {
     101                 :            :                         z_swap_unlocked();
     102                 :            :                 }
     103                 :            : # endif
     104                 :            : #endif
     105                 :            :         }
     106                 :            : }

Generated by: LCOV version 1.14