LCOV - code coverage report
Current view: top level - include/zephyr - spinlock.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 12 12 100.0 %
Date: 2022-08-18 11:36:24 Functions: 3 3 100.0 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 3 6 50.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Copyright (c) 2018 Intel Corporation.
       3                 :            :  *
       4                 :            :  * SPDX-License-Identifier: Apache-2.0
       5                 :            :  */
       6                 :            : 
       7                 :            : /**
       8                 :            :  * @file
       9                 :            :  * @brief Public interface for spinlocks
      10                 :            :  */
      11                 :            : 
      12                 :            : #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
      13                 :            : #define ZEPHYR_INCLUDE_SPINLOCK_H_
      14                 :            : 
      15                 :            : #include <sys/atomic.h>
      16                 :            : #include <sys/__assert.h>
      17                 :            : #include <stdbool.h>
      18                 :            : #include <arch/cpu.h>
      19                 :            : 
      20                 :            : #ifdef __cplusplus
      21                 :            : extern "C" {
      22                 :            : #endif
      23                 :            : 
      24                 :            : /**
      25                 :            :  * @brief Spinlock APIs
      26                 :            :  * @defgroup spinlock_apis Spinlock APIs
      27                 :            :  * @ingroup kernel_apis
      28                 :            :  * @{
      29                 :            :  */
      30                 :            : 
      31                 :            : struct z_spinlock_key {
      32                 :            :         int key;
      33                 :            : };
      34                 :            : 
      35                 :            : /**
      36                 :            :  * @brief Kernel Spin Lock
      37                 :            :  *
      38                 :            :  * This struct defines a spin lock record on which CPUs can wait with
      39                 :            :  * k_spin_lock().  Any number of spinlocks may be defined in
      40                 :            :  * application code.
      41                 :            :  */
      42                 :            : struct k_spinlock {
      43                 :            : #ifdef CONFIG_SMP
      44                 :            :         atomic_t locked;
      45                 :            : #endif
      46                 :            : 
      47                 :            : #ifdef CONFIG_SPIN_VALIDATE
      48                 :            :         /* Stores the thread that holds the lock with the locking CPU
      49                 :            :          * ID in the bottom two bits.
      50                 :            :          */
      51                 :            :         uintptr_t thread_cpu;
      52                 :            : #endif
      53                 :            : 
      54                 :            : #if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
      55                 :            :         !defined(CONFIG_SPIN_VALIDATE)
      56                 :            :         /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
      57                 :            :          * the k_spinlock struct will have no members. The result
      58                 :            :          * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
      59                 :            :          *
      60                 :            :          * This size difference causes problems when the k_spinlock
      61                 :            :          * is embedded into another struct like k_msgq, because C and
      62                 :            :          * C++ will have different ideas on the offsets of the members
      63                 :            :          * that come after the k_spinlock member.
      64                 :            :          *
      65                 :            :          * To prevent this we add a 1 byte dummy member to k_spinlock
      66                 :            :          * when the user selects C++ support and k_spinlock would
      67                 :            :          * otherwise be empty.
      68                 :            :          */
      69                 :            :         char dummy;
      70                 :            : #endif
      71                 :            : };
      72                 :            : 
      73                 :            : /* There's a spinlock validation framework available when asserts are
      74                 :            :  * enabled.  It adds a relatively hefty overhead (about 3k or so) to
      75                 :            :  * kernel code size, don't use on platforms known to be small.
      76                 :            :  */
      77                 :            : #ifdef CONFIG_SPIN_VALIDATE
      78                 :            : bool z_spin_lock_valid(struct k_spinlock *l);
      79                 :            : bool z_spin_unlock_valid(struct k_spinlock *l);
      80                 :            : void z_spin_lock_set_owner(struct k_spinlock *l);
      81                 :            : BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
      82                 :            : 
      83                 :            : # ifdef CONFIG_KERNEL_COHERENCE
      84                 :            : bool z_spin_lock_mem_coherent(struct k_spinlock *l);
      85                 :            : # endif /* CONFIG_KERNEL_COHERENCE */
      86                 :            : 
      87                 :            : #endif /* CONFIG_SPIN_VALIDATE */
      88                 :            : 
      89                 :            : /**
      90                 :            :  * @brief Spinlock key type
      91                 :            :  *
      92                 :            :  * This type defines a "key" value used by a spinlock implementation
      93                 :            :  * to store the system interrupt state at the time of a call to
      94                 :            :  * k_spin_lock().  It is expected to be passed to a matching
      95                 :            :  * k_spin_unlock().
      96                 :            :  *
      97                 :            :  * This type is opaque and should not be inspected by application
      98                 :            :  * code.
      99                 :            :  */
     100                 :            : typedef struct z_spinlock_key k_spinlock_key_t;
     101                 :            : 
     102                 :            : /**
     103                 :            :  * @brief Lock a spinlock
     104                 :            :  *
     105                 :            :  * This routine locks the specified spinlock, returning a key handle
     106                 :            :  * representing interrupt state needed at unlock time.  Upon
     107                 :            :  * returning, the calling thread is guaranteed not to be suspended or
     108                 :            :  * interrupted on its current CPU until it calls k_spin_unlock().  The
     109                 :            :  * implementation guarantees mutual exclusion: exactly one thread on
     110                 :            :  * one CPU will return from k_spin_lock() at a time.  Other CPUs
     111                 :            :  * trying to acquire a lock already held by another CPU will enter an
     112                 :            :  * implementation-defined busy loop ("spinning") until the lock is
     113                 :            :  * released.
     114                 :            :  *
     115                 :            :  * Separate spin locks may be nested. It is legal to lock an
     116                 :            :  * (unlocked) spin lock while holding a different lock.  Spin locks
     117                 :            :  * are not recursive, however: an attempt to acquire a spin lock that
     118                 :            :  * the CPU already holds will deadlock.
     119                 :            :  *
     120                 :            :  * In circumstances where only one CPU exists, the behavior of
     121                 :            :  * k_spin_lock() remains as specified above, though obviously no
     122                 :            :  * spinning will take place.  Implementations may be free to optimize
     123                 :            :  * in uniprocessor contexts such that the locking reduces to an
     124                 :            :  * interrupt mask operation.
     125                 :            :  *
     126                 :            :  * @param l A pointer to the spinlock to lock
     127                 :            :  * @return A key value that must be passed to k_spin_unlock() when the
     128                 :            :  *         lock is released.
     129                 :            :  */
     130                 :         37 : static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
     131                 :            : {
     132                 :            :         ARG_UNUSED(l);
     133                 :            :         k_spinlock_key_t k;
     134                 :            : 
     135                 :            :         /* Note that we need to use the underlying arch-specific lock
     136                 :            :          * implementation.  The "irq_lock()" API in SMP context is
     137                 :            :          * actually a wrapper for a global spinlock!
     138                 :            :          */
     139                 :         37 :         k.key = arch_irq_lock();
     140                 :            : 
     141                 :            : #ifdef CONFIG_SPIN_VALIDATE
     142         [ -  + ]:         37 :         __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
     143                 :            : # ifdef CONFIG_KERNEL_COHERENCE
     144                 :            :         __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
     145                 :            : # endif
     146                 :            : #endif
     147                 :            : 
     148                 :            : #ifdef CONFIG_SMP
     149                 :            :         while (!atomic_cas(&l->locked, 0, 1)) {
     150                 :            :         }
     151                 :            : #endif
     152                 :            : 
     153                 :            : #ifdef CONFIG_SPIN_VALIDATE
     154                 :         37 :         z_spin_lock_set_owner(l);
     155                 :            : #endif
     156                 :         37 :         return k;
     157                 :            : }
     158                 :            : 
     159                 :            : /**
     160                 :            :  * @brief Unlock a spin lock
     161                 :            :  *
     162                 :            :  * This releases a lock acquired by k_spin_lock().  After this
     163                 :            :  * function is called, any CPU will be able to acquire the lock.  If
     164                 :            :  * other CPUs are currently spinning inside k_spin_lock() waiting for
     165                 :            :  * this lock, exactly one of them will return synchronously with the
     166                 :            :  * lock held.
     167                 :            :  *
     168                 :            :  * Spin locks must be properly nested.  A call to k_spin_unlock() must
     169                 :            :  * be made on the lock object most recently locked using
     170                 :            :  * k_spin_lock(), using the key value that it returned.  Attempts to
     171                 :            :  * unlock mis-nested locks, or to unlock locks that are not held, or
     172                 :            :  * to passing a key parameter other than the one returned from
     173                 :            :  * k_spin_lock(), are illegal.  When CONFIG_SPIN_VALIDATE is set, some
     174                 :            :  * of these errors can be detected by the framework.
     175                 :            :  *
     176                 :            :  * @param l A pointer to the spinlock to release
     177                 :            :  * @param key The value returned from k_spin_lock() when this lock was
     178                 :            :  *        acquired
     179                 :            :  */
     180                 :         35 : static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
     181                 :            :                                         k_spinlock_key_t key)
     182                 :            : {
     183                 :            :         ARG_UNUSED(l);
     184                 :            : #ifdef CONFIG_SPIN_VALIDATE
     185         [ -  + ]:         35 :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     186                 :            : #endif
     187                 :            : 
     188                 :            : #ifdef CONFIG_SMP
     189                 :            :         /* Strictly we don't need atomic_clear() here (which is an
     190                 :            :          * exchange operation that returns the old value).  We are always
     191                 :            :          * setting a zero and (because we hold the lock) know the existing
     192                 :            :          * state won't change due to a race.  But some architectures need
     193                 :            :          * a memory barrier when used like this, and we don't have a
     194                 :            :          * Zephyr framework for that.
     195                 :            :          */
     196                 :            :         atomic_clear(&l->locked);
     197                 :            : #endif
     198                 :         35 :         arch_irq_unlock(key.key);
     199                 :         35 : }
     200                 :            : 
     201                 :            : /* Internal function: releases the lock, but leaves local interrupts
     202                 :            :  * disabled
     203                 :            :  */
     204                 :          2 : static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
     205                 :            : {
     206                 :            :         ARG_UNUSED(l);
     207                 :            : #ifdef CONFIG_SPIN_VALIDATE
     208         [ -  + ]:          2 :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     209                 :            : #endif
     210                 :            : #ifdef CONFIG_SMP
     211                 :            :         atomic_clear(&l->locked);
     212                 :            : #endif
     213                 :          2 : }
     214                 :            : 
     215                 :            : /** @} */
     216                 :            : 
     217                 :            : #ifdef __cplusplus
     218                 :            : }
     219                 :            : #endif
     220                 :            : 
     221                 :            : #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */

Generated by: LCOV version 1.14