/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/ |
H A D | ttm_lock.c | 63 bool locked = false; in __ttm_read_lock() local 68 locked = true; in __ttm_read_lock() 71 return locked; in __ttm_read_lock() 86 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) in __ttm_read_trylock() argument 90 *locked = false; in __ttm_read_trylock() 96 *locked = true; in __ttm_read_trylock() 108 bool locked; in ttm_read_trylock() local 112 (lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock() 114 wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock() 117 BUG_ON(locked); in ttm_read_trylock() 134 bool locked = false; __ttm_write_lock() local 177 bool locked = false; __ttm_suspend_lock() local [all...] |
/kernel/linux/linux-5.10/kernel/locking/ |
H A D | qspinlock_paravirt.h | 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 142 * Try to clear pending bit & set locked bit in trylock_clear_pending() 289 * Wait for node->locked to become true, halt the vcpu after a short spin. 302 if (READ_ONCE(node->locked)) in pv_wait_node() 312 * Order pn->state vs pn->locked thusly: in pv_wait_node() 314 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node() 316 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node() 322 if (!READ_ONCE(node->locked)) { in pv_wait_node() 336 * If the locked fla in pv_wait_node() 493 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) __pv_queued_spin_unlock_slowpath() argument 549 u8 locked; __pv_queued_spin_unlock() local [all...] |
H A D | mcs_spinlock.h | 20 int locked; /* 1 if lock acquired */ member 61 * on this node->locked until the previous lock holder sets the node->locked 70 node->locked = 0; in mcs_spin_lock() 82 * Lock acquired, don't need to set node->locked to 1. Threads in mcs_spin_lock() 83 * only spin on its own node->locked value for lock acquisition. in mcs_spin_lock() 85 * and does not proceed to spin on its own node->locked, this in mcs_spin_lock() 87 * audit lock status, then set node->locked value here. in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock() [all...] |
H A D | osq_lock.c | 97 node->locked = 0; in osq_lock() 143 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || in osq_lock() 167 * in which case we should observe @node->locked becomming in osq_lock() 170 if (smp_load_acquire(&node->locked)) in osq_lock() 225 WRITE_ONCE(next->locked, 1); in osq_unlock() 231 WRITE_ONCE(next->locked, 1); in osq_unlock()
|
H A D | qspinlock.c | 46 * unlock the next pending (next->locked), we compress both these: {tail, 47 * next->locked} into a single u32 value. 263 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 330 * Wait for in-progress pending->locked hand-overs with a bounded in queued_spin_lock_slowpath() 376 * store-release that clears the locked bit and create lock in queued_spin_lock_slowpath() 434 node->locked = 0; in queued_spin_lock_slowpath() 474 arch_mcs_spin_lock_contended(&node->locked); in queued_spin_lock_slowpath() 494 * store-release that clears the locked bit and create lock in queued_spin_lock_slowpath() 501 * been designated yet, there is no way for the locked value to become in queued_spin_lock_slowpath() 509 goto locked; in queued_spin_lock_slowpath() [all...] |
/kernel/linux/linux-6.6/kernel/locking/ |
H A D | qspinlock_paravirt.h | 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 142 * Try to clear pending bit & set locked bit in trylock_clear_pending() 289 * Wait for node->locked to become true, halt the vcpu after a short spin. 302 if (READ_ONCE(node->locked)) in pv_wait_node() 312 * Order pn->state vs pn->locked thusly: in pv_wait_node() 314 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node() 316 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node() 322 if (!READ_ONCE(node->locked)) { in pv_wait_node() 336 * If the locked fla in pv_wait_node() 503 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) __pv_queued_spin_unlock_slowpath() argument 549 u8 locked; __pv_queued_spin_unlock() local [all...] |
H A D | mcs_spinlock.h | 20 int locked; /* 1 if lock acquired */ member 61 * on this node->locked until the previous lock holder sets the node->locked 70 node->locked = 0; in mcs_spin_lock() 82 * Lock acquired, don't need to set node->locked to 1. Threads in mcs_spin_lock() 83 * only spin on its own node->locked value for lock acquisition. in mcs_spin_lock() 85 * and does not proceed to spin on its own node->locked, this in mcs_spin_lock() 87 * audit lock status, then set node->locked value here. in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock() [all...] |
H A D | qspinlock.c | 47 * unlock the next pending (next->locked), we compress both these: {tail, 48 * next->locked} into a single u32 value. 264 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 331 * Wait for in-progress pending->locked hand-overs with a bounded in queued_spin_lock_slowpath() 377 * store-release that clears the locked bit and create lock in queued_spin_lock_slowpath() 383 smp_cond_load_acquire(&lock->locked, !VAL); in queued_spin_lock_slowpath() 437 node->locked = 0; in queued_spin_lock_slowpath() 477 arch_mcs_spin_lock_contended(&node->locked); in queued_spin_lock_slowpath() 497 * store-release that clears the locked bit and create lock in queued_spin_lock_slowpath() 504 * been designated yet, there is no way for the locked valu in queued_spin_lock_slowpath() [all...] |
H A D | osq_lock.c | 97 node->locked = 0; in osq_lock() 143 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || in osq_lock() 167 * in which case we should observe @node->locked becoming in osq_lock() 170 if (smp_load_acquire(&node->locked)) in osq_lock() 225 WRITE_ONCE(next->locked, 1); in osq_unlock() 231 WRITE_ONCE(next->locked, 1); in osq_unlock()
|
/kernel/liteos_a/kernel/extended/plimit/ |
H A D | los_memlimit.c | 162 #define MEM_LIMIT_LOCK(state, locked) do { \ 164 locked = TRUE; \ 170 #define MEM_LIMIT_UNLOCK(state, locked) do { \ 171 if (!locked) { \ 179 BOOL locked = FALSE; in OsMemLimitCheckAndMemAdd() local 180 MEM_LIMIT_LOCK(intSave, locked); in OsMemLimitCheckAndMemAdd() 184 MEM_LIMIT_UNLOCK(intSave, locked); in OsMemLimitCheckAndMemAdd() 191 MEM_LIMIT_UNLOCK(intSave, locked); in OsMemLimitCheckAndMemAdd() 201 MEM_LIMIT_UNLOCK(intSave, locked); in OsMemLimitCheckAndMemAdd() 208 BOOL locked in OsMemLimitMemFree() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
H A D | locks.h | 58 bool locked; member 65 return __sync_bool_compare_and_swap(&lock->locked, false, true); in lock_impl_trylock() 68 bool old_locked = lock->locked; in lock_impl_trylock() 69 lock->locked = true; in lock_impl_trylock() 96 BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false)); in lock_impl_unlock() 102 bool old_locked = lock->locked; in lock_impl_unlock() 103 lock->locked = false; in lock_impl_unlock() 112 lock->locked = false; in lock_impl_init() 115 #define LOCK_IMPL_INITIALIZER {.locked = false}
|
/kernel/linux/linux-6.6/tools/testing/selftests/net/forwarding/ |
H A D | bridge_locked_port.sh | 102 bridge link set dev $swp1 locked on 112 bridge link set dev $swp1 locked off 133 bridge link set dev $swp1 locked on 142 bridge link set dev $swp1 locked off 161 bridge link set dev $swp1 locked on 170 bridge link set dev $swp1 locked off 187 bridge link set dev $swp1 learning on locked on 190 check_fail $? "Ping worked on a locked port without an FDB entry" 195 bridge link set dev $swp1 learning on locked on mab on 200 bridge fdb get `mac_get $h1` br br0 vlan 1 | grep "dev $swp1" | grep -q "locked" [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/dsa/ |
H A D | bridge_locked_port.sh | 102 bridge link set dev $swp1 locked on 112 bridge link set dev $swp1 locked off 133 bridge link set dev $swp1 locked on 142 bridge link set dev $swp1 locked off 161 bridge link set dev $swp1 locked on 170 bridge link set dev $swp1 locked off 187 bridge link set dev $swp1 learning on locked on 190 check_fail $? "Ping worked on a locked port without an FDB entry" 195 bridge link set dev $swp1 learning on locked on mab on 200 bridge fdb get `mac_get $h1` br br0 vlan 1 | grep "dev $swp1" | grep -q "locked" [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_dcb_lib.h | 18 int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); 24 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 26 void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked); 28 int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 93 ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) in ice_init_pf_dcb() argument 102 bool __always_unused locked) in ice_pf_dcb_cfg() 131 static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { } in ice_pf_dcb_recfg() argument 100 ice_pf_dcb_cfg(struct ice_pf __always_unused *pf, struct ice_dcbx_cfg __always_unused *new_cfg, bool __always_unused locked) ice_pf_dcb_cfg() argument
|
/kernel/linux/linux-6.6/mm/ |
H A D | gup.c | 914 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set 919 int *locked) in faultin_page() 964 *locked = 0; in faultin_page() 986 *locked = 0; in faultin_page() 1139 * @locked: whether we're still with the mmap_lock held 1174 * be released. If this happens *@locked will be set to 0 on return. 1187 int *locked) in __get_user_pages() 1240 PTR_ERR(page) == -EMLINK, locked); in __get_user_pages() 1452 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by 1454 * set (*locked 917 faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, bool unshare, int *locked) faultin_page() argument 1184 __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) __get_user_pages() argument 1464 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int flags) __get_user_pages_locked() argument 1621 populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked) populate_vma_page_range() argument 1690 faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked) faultin_vma_page_range() argument 1742 int locked = 0; __mm_populate() local 1791 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int foll_flags) __get_user_pages_locked() argument 2015 int locked = 0; get_dump_page() local 2187 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int gup_flags) __gup_longterm_locked() argument 2222 is_valid_gup_args(struct page **pages, int *locked, unsigned int *gup_flags_p, unsigned int to_set) is_valid_gup_args() argument 2326 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_remote() argument 2344 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_remote() argument 2370 int locked = 1; get_user_pages() local 2398 int locked = 0; get_user_pages_unlocked() local 3172 int locked = 0; internal_get_user_pages_fast() local 3336 pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) pin_user_pages_remote() argument 3373 int locked = 1; pin_user_pages() local 3393 int locked = 0; pin_user_pages_unlocked() local [all...] |
/kernel/linux/linux-5.10/drivers/media/dvb-frontends/ |
H A D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked in lgs8gxx_auto_detect() local 358 lgs8gxx_autolock_gi(struct lgs8gxx_state *priv, u8 gi, u8 cpn, u8 *locked) lgs8gxx_autolock_gi() argument 716 u8 t, locked = 0; lgs8gxx_read_status() local [all...] |
/kernel/linux/linux-6.6/drivers/media/dvb-frontends/ |
H A D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked in lgs8gxx_auto_detect() local 358 lgs8gxx_autolock_gi(struct lgs8gxx_state *priv, u8 gi, u8 cpn, u8 *locked) lgs8gxx_autolock_gi() argument 716 u8 t, locked = 0; lgs8gxx_read_status() local [all...] |
/kernel/linux/linux-5.10/arch/loongarch/include/asm/ |
H A D | qspinlock.h | 19 compiletime_assert_atomic_type(lock->locked); in native_queued_spin_unlock() 21 WRITE_ONCE(lock->locked, 0); in native_queued_spin_unlock() 42 compiletime_assert_atomic_type(lock->locked); in queued_spin_unlock() 44 WRITE_ONCE(lock->locked, 0); in queued_spin_unlock()
|
/kernel/linux/linux-5.10/mm/ |
H A D | gup.c | 530 * If the page is already locked, we don't need to in follow_page_pte() 862 * mmap_lock must be held on entry. If @locked != NULL and *@flags 864 * is, *@locked will be set to 0 and -EBUSY returned. 867 unsigned long address, unsigned int *flags, int *locked) in faultin_page() 879 if (locked) in faultin_page() 901 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) in faultin_page() 902 *locked = 0; in faultin_page() 978 * @locked: whether we're still with the mmap_lock held 1014 * If @locked != NULL, *@locked wil 866 faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *locked) faultin_page() argument 1027 __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages() argument 1253 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) __get_user_pages_locked() argument 1401 populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked) populate_vma_page_range() argument 1452 int locked = 0; __mm_populate() local 1500 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int foll_flags) __get_user_pages_locked() argument 1563 int locked = 1; get_dump_page() local 1792 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument 1881 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument 1895 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument 1903 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument 1972 get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_locked() argument 2016 int locked = 1; get_user_pages_unlocked() local 2949 pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) pin_user_pages_remote() argument 3018 pin_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) pin_user_pages_locked() argument [all...] |
H A D | mremap.c | 336 bool *locked, unsigned long flags, in move_vma() 460 *locked = true; in move_vma() 523 unsigned long locked, lock_limit; in vma_to_resize() local 524 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize() 526 locked += new_len - old_len; in vma_to_resize() 527 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) in vma_to_resize() 546 unsigned long new_addr, unsigned long new_len, bool *locked, in mremap_to() 626 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to() 667 bool locked = false; in SYSCALL_DEFINE5() local 719 &locked, flag in SYSCALL_DEFINE5() 333 move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) move_vma() argument 545 mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap_early, struct list_head *uf_unmap) mremap_to() argument [all...] |
/kernel/linux/linux-5.10/sound/core/seq/ |
H A D | seq_queue.h | 29 bool locked; /* timer is only accesibble by owner if set */ member 57 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); 89 int snd_seq_queue_set_owner(int queueid, int client, int locked); 90 int snd_seq_queue_set_locked(int queueid, int client, int locked);
|
H A D | seq_queue.c | 19 * - The queue is locked when struct snd_seq_queue pointer is returned via 96 static struct snd_seq_queue *queue_new(int owner, int locked) in queue_new() argument 122 q->locked = locked; in queue_new() 168 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) in snd_seq_queue_alloc() argument 172 q = queue_new(client, locked); in snd_seq_queue_alloc() 355 return (q->owner == client) || (!q->locked && !q->klocked); in check_access() 405 int snd_seq_queue_set_owner(int queueid, int client, int locked) in snd_seq_queue_set_owner() argument 419 q->locked = locked in snd_seq_queue_set_owner() 760 bool locked; snd_seq_info_queues_read() local [all...] |
/kernel/linux/linux-6.6/sound/core/seq/ |
H A D | seq_queue.h | 29 bool locked; /* timer is only accesibble by owner if set */ member 57 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); 86 int snd_seq_queue_set_owner(int queueid, int client, int locked); 87 int snd_seq_queue_set_locked(int queueid, int client, int locked);
|
/kernel/linux/linux-5.10/drivers/thermal/intel/ |
H A D | intel_quark_dts_thermal.c | 95 /* If DTS not locked, all trip points are configurable */ 97 /* If DTS locked, all trip points are not configurable */ 103 bool locked; member 132 if (!aux_entry->locked) { in soc_dts_enable() 139 pr_info("DTS is locked. Cannot enable DTS\n"); in soc_dts_enable() 160 if (!aux_entry->locked) { in soc_dts_disable() 168 pr_info("DTS is locked. Cannot disable DTS\n"); in soc_dts_disable() 220 if (aux_entry->locked) { in update_trip_temp() 328 if (!aux_entry->locked) { in free_soc_dts() 357 /* Check if DTS register is locked */ in alloc_soc_dts() [all...] |
/kernel/linux/linux-6.6/drivers/thermal/intel/ |
H A D | intel_quark_dts_thermal.c | 96 /* If DTS not locked, all trip points are configurable */ 98 /* If DTS locked, all trip points are not configurable */ 104 bool locked; member 134 if (!aux_entry->locked) { in soc_dts_enable() 141 pr_info("DTS is locked. Cannot enable DTS\n"); in soc_dts_enable() 162 if (!aux_entry->locked) { in soc_dts_disable() 170 pr_info("DTS is locked. Cannot disable DTS\n"); in soc_dts_disable() 211 if (aux_entry->locked) { in update_trip_temp() 305 if (!aux_entry->locked) { in free_soc_dts() 334 /* Check if DTS register is locked */ in alloc_soc_dts() [all...] |