Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:kernel\sched\core.c Create Date:2022-07-28 09:36:28
Last Modify:2022-05-22 13:40:38 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:__sched_setscheduler

Proto:static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi)

Type:int

Parameter:

TypeParameterName
struct task_struct *p
const struct sched_attr *attr
booluser
boolpi
4769  newprio = If dl_policy(sched_policy) Then SCHED_DEADLINE tasks has negative priorities, reflecting* the fact that any of them has higher prio than RT and* NORMAL/BATCH tasks. - 1 Else MAX_RT_PRIO - 1 - SCHED_FIFO, SCHED_RR
4771  oldpolicy = -1
4772  policy = sched_policy
4776  queue_flags = Matches ENQUEUE_RESTORE | Matches ENQUEUE_MOVE | Matches ENQUEUE_NOCLOCK
4780  BUG_ON(pi && in_interrupt())
4781  recheck :
4783  If policy < 0 Then
4784  reset_on_fork = Scheduler bits, serialized by scheduler locks:
4785  policy = oldpolicy = policy
4786  Else
4787  reset_on_fork = Not Not (sched_flags & For the sched_{set,get}attr() calls)
4789  If Not valid_policy(policy) Then Return -EINVAL
4793  If sched_flags & ~(SCHED_FLAG_ALL | !! For sched_setattr_nocheck() (kernel) only !!* This is actually gross. :(* It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE* tasks, but still be able to sleep. We need this on platforms that cannot) Then Return -EINVAL
4801  If mm && SCHED_FIFO, SCHED_RR > Maximum user real-time process priority - 1 || Not mm && SCHED_FIFO, SCHED_RR > MAX_RT_PRIO - 1 Then Return -EINVAL
4804  If dl_policy(policy) && Not __checkparam_dl(attr) || rt_policy(policy) != SCHED_FIFO, SCHED_RR != 0 Then Return -EINVAL
4811  If user && Not Check operation authority Then
4812  If fair_policy(policy) Then
4818  If rt_policy(policy) Then
4823  If policy != policy && Not rlim_rtprio Then Return -EPERM
4838  If dl_policy(policy) Then Return -EPERM
4845  If task_has_idle_policy(p) && Not idle_policy(policy) Then
4851  If Not Check the target process has a UID that matches the current process's: Then Return -EPERM
4855  If Scheduler bits, serialized by scheduler locks: && Not reset_on_fork Then Return -EPERM
4859  If user Then
4860  If sched_flags & !! For sched_setattr_nocheck() (kernel) only !!* This is actually gross. :(* It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE* tasks, but still be able to sleep. We need this on platforms that cannot Then Return -EINVAL
4863  retval = security_task_setscheduler(p)
4864  If retval Then Return retval
4869  If sched_flags & SCHED_FLAG_UTIL_CLAMP Then
4870  retval = uclamp_validate(p, attr)
4871  If retval Then Return retval
4875  If pi Then cpuset_read_lock()
4885  rq = ask_rq_lock - lock p->pi_lock and lock the rq @p resides on.
4886  update_rq_clock(rq)
4891  If p == stop Then
4892  retval = -EINVAL
4893  Go to unlock
4900  If Value for the false possibility is greater at compile time(policy == policy) Then
4901  If fair_policy(policy) && SCHED_NORMAL, SCHED_BATCH != ask_nice - return the nice value of a given task.*@p: the task in question.* Return: The nice value [ -20 ... 0 ... 19 ]. Then Go to change
4903  If rt_policy(policy) && SCHED_FIFO, SCHED_RR != rt_priority Then Go to change
4905  If dl_policy(policy) && dl_param_changed(p, attr) Then Go to change
4907  If sched_flags & SCHED_FLAG_UTIL_CLAMP Then Go to change
4910  Scheduler bits, serialized by scheduler locks: = reset_on_fork
4911  retval = 0
4912  Go to unlock
4914  change :
4916  If user Then If Value for the false possibility is greater at compile time(oldpolicy != - 1 && oldpolicy != policy) Then
4950  policy = oldpolicy = -1
4951  task_rq_unlock(rq, p, & rf)
4952  If pi Then cpuset_read_unlock()
4954  Go to recheck
4962  If (dl_policy(policy) || dl_task(p)) && We must be sure that accepting a new task (or allowing changing the* parameters of an existing one) is consistent with the bandwidth* constraints. If yes, this function also accordingly updates the currently Then
4963  retval = -EBUSY
4964  Go to unlock
4967  Scheduler bits, serialized by scheduler locks: = reset_on_fork
4968  oldprio = prio
4970  If pi Then
4978  new_effective_prio = rt_effective_prio(p, newprio)
4979  If new_effective_prio == oldprio Then queue_flags &= ~Matches ENQUEUE_MOVE
4983  queued = task_on_rq_queued(p)
4984  running = task_current(rq, p)
4985  If queued Then dequeue_task(rq, p, queue_flags)
4987  If running Then put_prev_task(rq, p)
4990  prev_class = sched_class
4992  Actually do priority change: must hold pi & rq lock.
4993  __setscheduler_uclamp(p, attr)
4995  If queued Then
5000  If oldprio < prio Then queue_flags |= ENQUEUE_HEAD
5003  enqueue_task(rq, p, queue_flags)
5005  If running Then set_next_task(rq, p)
5008  switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,* use the balance_callback list if you want balancing.* this means any call to check_class_changed() must be followed by a call to* balance_callback().
5011  Even if we don't have any preemption, we need preempt disable/enable* to be barriers, so that we don't have things like get_user/put_user* that can cause faults and scheduling migrate into our preempt-protected* region.()
5012  task_rq_unlock(rq, p, & rf)
5014  If pi Then
5015  cpuset_read_unlock()
5016  rt_mutex_adjust_pi(p)
5020  balance_callback(rq)
5021  preempt_enable()
5023  Return 0
5025  unlock :
5026  task_rq_unlock(rq, p, & rf)
5027  If pi Then cpuset_read_unlock()
5029  Return retval
Caller
NameDescribe
_sched_setscheduler
sched_setattr
sched_setattr_nocheck