Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:kernel\locking\mutex.c Create Date:2022-07-28 09:47:30
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:Lock a mutex (possibly interruptible), slowpath:

Proto:static __always_inline int __sched __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)

Type:int

Parameter:

TypeParameterName
struct mutex *lock
longstate
unsigned intsubclass
struct lockdep_map *nest_lock
unsigned longip
struct ww_acquire_ctx *ww_ctx
const booluse_ww_ctx
931  bool first = false
935  might_sleep()
941  ww = container_of - cast a member of a structure out to the containing structure*@ptr: the pointer to the member.*@type: the type of the container struct this is embedded in.*@member: the name of the member within the struct.(lock, structww_mutex, base)
942  If use_ww_ctx && ww_ctx Then
943  If Value for the false possibility is greater at compile time(ww_ctx == READ_ONCE(ctx)) Then Return -EALREADY
951  If acquired == 0 Then wounded = 0
955  Even if we don't have any preemption, we need preempt disable/enable* to be barriers, so that we don't have things like get_user/put_user* that can cause faults and scheduling migrate into our preempt-protected* region.()
956  mutex_acquire_nest( & dep_map, subclass, 0, nest_lock, ip)
958  If Actual trylock that will work on any unlocked state. || mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL) Then
961  lock_acquired( & dep_map, ip)
962  If use_ww_ctx && ww_ctx Then After acquiring lock with fastpath, where we do not hold wait_lock, set ctx* and wake up any waiters so they can recheck.
964  preempt_enable()
965  Return 0
968  spin_lock( & wait_lock)
972  If Actual trylock that will work on any unlocked state. Then
973  If use_ww_ctx && ww_ctx Then We just acquired @lock under @ww_ctx, if there are later contexts waiting* behind us on the wait-list, check if they need to die, or wound us.* See __ww_mutex_add_waiter() for the list-order construction; basically the
976  Go to skip_wait
979  debug_mutex_lock_common(lock, & waiter)
981  lock_contended( & dep_map, ip)
983  If Not use_ww_ctx Then
985  Add @waiter to a given location in the lock wait_list and set the* FLAG_WAITERS flag if it's the first waiter.
991  Else
996  ret = Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest* first
997  If ret Then Go to err_early_kill
1000  ww_ctx = ww_ctx
1003  task = current process
1005  set_current_state(state)
1006  cycle
1013  If Actual trylock that will work on any unlocked state. Then Go to acquired
1022  ret = -EINTR
1023  Go to err
1026  If use_ww_ctx && ww_ctx Then
1028  If ret Then Go to err
1032  spin_unlock( & wait_lock)
1033  schedule_preempt_disabled - called with preemption disabled* Returns with preemption disabled. Note: preempt_count must be 1
1039  If use_ww_ctx && ww_ctx || Not first Then
1045  set_current_state(state)
1051  If Actual trylock that will work on any unlocked state. || first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, & waiter) Then Break
1055  spin_lock( & wait_lock)
1057  spin_lock( & wait_lock)
1058  acquired :
1059  set_current_state() includes a barrier so that the write of current->state* is correctly serialised wrt the caller's subsequent test of whether to* actually sleep:* for (;;) {* set_current_state(TASK_UNINTERRUPTIBLE);* if (!need_sleep)* break;* (Used in tsk->state: )
1061  If use_ww_ctx && ww_ctx Then
1066  If Not is_wait_die && Not __mutex_waiter_is_first(lock, & waiter) Then We just acquired @lock under @ww_ctx, if there are later contexts waiting* behind us on the wait-list, check if they need to die, or wound us.* See __ww_mutex_add_waiter() for the list-order construction; basically the
1071  Mutexes: blocking mutual exclusion locks* started by Ingo Molnar:* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar * This file contains mutex debugging related internal prototypes, for the* !CONFIG_DEBUG_MUTEXES case(lock, & waiter, current process)
1072  If Value is more likely to compile time(list_empty - tests whether a list is empty*@head: the list to test.) Then __mutex_clear_flag(lock, MUTEX_FLAGS)
1075  debug_mutex_free_waiter( & waiter)
1077  skip_wait :
1079  lock_acquired( & dep_map, ip)
1081  If use_ww_ctx && ww_ctx Then Associate the ww_mutex @ww with the context @ww_ctx under which we acquired* it.
1084  spin_unlock( & wait_lock)
1085  preempt_enable()
1086  Return 0
1088  err :
1089  set_current_state() includes a barrier so that the write of current->state* is correctly serialised wrt the caller's subsequent test of whether to* actually sleep:* for (;;) {* set_current_state(TASK_UNINTERRUPTIBLE);* if (!need_sleep)* break;* (Used in tsk->state: )
1090  Mutexes: blocking mutual exclusion locks* started by Ingo Molnar:* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar * This file contains mutex debugging related internal prototypes, for the* !CONFIG_DEBUG_MUTEXES case(lock, & waiter, current process)
1091  err_early_kill :
1092  spin_unlock( & wait_lock)
1093  debug_mutex_free_waiter( & waiter)
1094  mutex_release( & dep_map, ip)
1095  preempt_enable()
1096  Return ret
Caller
NameDescribe
__mutex_lock
__ww_mutex_lock
mutex_lock_io_nested