Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:arch\x86\include\asm\current.h Create Date:2022-07-28 05:34:40
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:get_current

Proto:static __always_inline struct task_struct *get_current(void)

Type:struct task_struct

Parameter:Nothing

15  Return his_cpu_read() makes gcc load the percpu variable every time it is* accessed while this_cpu_read_stable() allows the value to be cached.* this_cpu_read_stable() is more efficient and can be used if its value* is guaranteed to be valid across cpus(current_task)
Caller
NameDescribe
dump_stack_print_infodump_stack_print_info - print generic debug info for dump_stack()*@log_lvl: log level* Arch-specific dump_stack() implementations can use this function to* print out the same debug information as the generic dump_stack().
irqsafe1_hard_spin_12
irqsafe1_hard_spin_21
irqsafe1_hard_rlock_12
irqsafe1_hard_rlock_21
irqsafe1_hard_wlock_12
irqsafe1_hard_wlock_21
irqsafe1_soft_spin_12
irqsafe1_soft_spin_21
irqsafe1_soft_rlock_12
irqsafe1_soft_rlock_21
irqsafe1_soft_wlock_12
irqsafe1_soft_wlock_21
irqsafe2B_hard_spin_12
irqsafe2B_hard_spin_21
irqsafe2B_hard_rlock_12
irqsafe2B_hard_rlock_21
irqsafe2B_hard_wlock_12
irqsafe2B_hard_wlock_21
irqsafe2B_soft_spin_12
irqsafe2B_soft_spin_21
irqsafe2B_soft_rlock_12
irqsafe2B_soft_rlock_21
irqsafe2B_soft_wlock_12
irqsafe2B_soft_wlock_21
irqsafe3_hard_spin_123
irqsafe3_hard_spin_132
irqsafe3_hard_spin_213
irqsafe3_hard_spin_231
irqsafe3_hard_spin_312
irqsafe3_hard_spin_321
irqsafe3_hard_rlock_123
irqsafe3_hard_rlock_132
irqsafe3_hard_rlock_213
irqsafe3_hard_rlock_231
irqsafe3_hard_rlock_312
irqsafe3_hard_rlock_321
irqsafe3_hard_wlock_123
irqsafe3_hard_wlock_132
irqsafe3_hard_wlock_213
irqsafe3_hard_wlock_231
irqsafe3_hard_wlock_312
irqsafe3_hard_wlock_321
irqsafe3_soft_spin_123
irqsafe3_soft_spin_132
irqsafe3_soft_spin_213
irqsafe3_soft_spin_231
irqsafe3_soft_spin_312
irqsafe3_soft_spin_321
irqsafe3_soft_rlock_123
irqsafe3_soft_rlock_132
irqsafe3_soft_rlock_213
irqsafe3_soft_rlock_231
irqsafe3_soft_rlock_312
irqsafe3_soft_rlock_321
irqsafe3_soft_wlock_123
irqsafe3_soft_wlock_132
irqsafe3_soft_wlock_213
irqsafe3_soft_wlock_231
irqsafe3_soft_wlock_312
irqsafe3_soft_wlock_321
irqsafe4_hard_spin_123
irqsafe4_hard_spin_132
irqsafe4_hard_spin_213
irqsafe4_hard_spin_231
irqsafe4_hard_spin_312
irqsafe4_hard_spin_321
irqsafe4_hard_rlock_123
irqsafe4_hard_rlock_132
irqsafe4_hard_rlock_213
irqsafe4_hard_rlock_231
irqsafe4_hard_rlock_312
irqsafe4_hard_rlock_321
irqsafe4_hard_wlock_123
irqsafe4_hard_wlock_132
irqsafe4_hard_wlock_213
irqsafe4_hard_wlock_231
irqsafe4_hard_wlock_312
irqsafe4_hard_wlock_321
irqsafe4_soft_spin_123
irqsafe4_soft_spin_132
irqsafe4_soft_spin_213
irqsafe4_soft_spin_231
irqsafe4_soft_spin_312
irqsafe4_soft_spin_321
irqsafe4_soft_rlock_123
irqsafe4_soft_rlock_132
irqsafe4_soft_rlock_213
irqsafe4_soft_rlock_231
irqsafe4_soft_rlock_312
irqsafe4_soft_rlock_321
irqsafe4_soft_wlock_123
irqsafe4_soft_wlock_132
irqsafe4_soft_wlock_213
irqsafe4_soft_wlock_231
irqsafe4_soft_wlock_312
irqsafe4_soft_wlock_321
irq_inversion_hard_spin_123
irq_inversion_hard_spin_132
irq_inversion_hard_spin_213
irq_inversion_hard_spin_231
irq_inversion_hard_spin_312
irq_inversion_hard_spin_321
irq_inversion_hard_rlock_123
irq_inversion_hard_rlock_132
irq_inversion_hard_rlock_213
irq_inversion_hard_rlock_231
irq_inversion_hard_rlock_312
irq_inversion_hard_rlock_321
irq_inversion_hard_wlock_123
irq_inversion_hard_wlock_132
irq_inversion_hard_wlock_213
irq_inversion_hard_wlock_231
irq_inversion_hard_wlock_312
irq_inversion_hard_wlock_321
irq_inversion_soft_spin_123
irq_inversion_soft_spin_132
irq_inversion_soft_spin_213
irq_inversion_soft_spin_231
irq_inversion_soft_spin_312
irq_inversion_soft_spin_321
irq_inversion_soft_rlock_123
irq_inversion_soft_rlock_132
irq_inversion_soft_rlock_213
irq_inversion_soft_rlock_231
irq_inversion_soft_rlock_312
irq_inversion_soft_rlock_321
irq_inversion_soft_wlock_123
irq_inversion_soft_wlock_132
irq_inversion_soft_wlock_213
irq_inversion_soft_wlock_231
irq_inversion_soft_wlock_312
irq_inversion_soft_wlock_321
irq_read_recursion_hard_123
irq_read_recursion_hard_132
irq_read_recursion_hard_213
irq_read_recursion_hard_231
irq_read_recursion_hard_312
irq_read_recursion_hard_321
irq_read_recursion_soft_123
irq_read_recursion_soft_132
irq_read_recursion_soft_213
irq_read_recursion_soft_231
irq_read_recursion_soft_312
irq_read_recursion_soft_321
check_preemption_disabled
should_failThis code is stolen from failmalloc-1.0* http://www.nongnu.org/failmalloc/
validate_nla
__nla_validate_parse
suppress_report
ubsan_prologue
ubsan_epilogue
restore_sigcontext
setup_sigcontext
get_sigframe
__setup_frame
__setup_rt_frame
handle_signal
__die
read_ldt
write_ldt
get_align_maskAlign a virtual address to avoid aliasing in the I$ on AMD F15h.
find_start_endSYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,unsigned long, prot, unsigned long, flags,unsigned long, fd, unsigned long, off)long error;error = -EINVAL;if (off & ~PAGE_MASK)goto out;error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off
arch_get_unmapped_area
arch_get_unmapped_area_topdown
aout_dump_debugregsDump the debug register contents to the user.* We can't dump our per cpu values because it* may contain cpu wide breakpoint, something that* doesn't belong to the current task.* TODO: include non-ptrace user breakpoints (perf)
hw_breakpoint_restore
hw_breakpoint_handlerHandle debug exception notifications.* Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.* NOTIFY_DONE returned if one of the following conditions is true.* i) When the causative address is from user-space and the exception
copy_thread_tls
arch_setup_new_execCalled immediately after a successful exec.
arch_align_stack
kernel_fpu_begin
fpu__saveSave the FPU state (mark it for reload if necessary):* This only ever gets called for the current task.
fpu__copy
fpu__initializeActivate the current task's in-memory FPU context,* if it has not been used before:
fpu__prepare_readThis function must be called before we read a task's fpstate
fpu__prepare_writeThis function must be called before we write a task's fpstate.* Invalidate any cached FPU registers.* After this function call, after registers in the fpstate are* modified and the child task has woken up, the child task will
fpu__dropDrops current FPU state: deactivates the fpregs and* the fpstate. NOTE: it still leaves previous contents* in the fpregs in the eager-FPU case.* This function can be used in cases where we know that* a state-restore is coming: either an explicit one,
fpu__clearClear the FPU state back to init state.* Called by sys_execve(), by the signal handler code and by various* error paths.
fpregs_mark_activate
get_xsave_field_ptrThis wraps up the common operations that need to occur when retrieving* data from xsave state. It first ensures that the current task was* using the FPU and retrieves the data in to a buffer. It then calculates
ptrace_triggered
get_free_idxsys_alloc_thread_area: get a yet unused TLS descriptor index.
set_tls_desc
move_myself
pseudo_lock_dev_mmap
save_v86_state
do_sys_vm86
set_vflags_longIt is correct to call set_IF(regs) from the set_vflags_** functions. However someone forgot to call clear_IF(regs)* in the opposite case.* After the command sequence CLI PUSHF STI POPF you should* end up with interrupts disabled, but you ended up with
set_vflags_short
get_vflags
do_intThere are so many possible reasons for this function to return* VM86_INTx, so adding another doesn't bother me. We can expect* userspace programs to be able to handle it. (Getting a problem* in userspace is always better than an Oops anyway.) [KD]
handle_vm86_trap
handle_vm86_fault
riprel_pre_xolIf we're emulating a rip-relative instruction, save the contents* of the scratch register and store the target address in that register.
riprel_post_xol
default_post_xol_opWe have to fix things up as follows:* Typically, the new ip is relative to the copied instruction
arch_uprobe_pre_xolarch_uprobe_pre_xol - prepare to execute out of line.*@auprobe: the probepoint information.*@regs: reflects the saved user state of current task.
arch_uprobe_post_xolCalled after single-stepping. To avoid the SMP problems that can* occur when we temporarily put back the original opcode to* single-step, we single-stepped a copy of the instruction.* This function prepares to resume execution after the single-step.
arch_uprobe_abort_xolThis function gets called when XOL instruction either gets trapped or* the thread has a fatal signal. Reset the instruction pointer to its* probed address for the potential restart or for post mortem analysis.
arch_uretprobe_hijack_return_addr
__mmdropCalled when the last reference to the mm* is dropped: either by a lazy thread or by* mmput. Free the page directory and the mm.
mm_init
mm_access
copy_mm
copy_fs
copy_files
copy_io
copy_sighand
copy_signal
SYSCALL_DEFINE1
copy_processCreate a new process
check_unshare_flagsCheck constraints on flags passed to the unshare system call.
unshare_fsUnshare the filesystem structure if it is being shared
unshare_fdUnshare file descriptor table if it is being shared
ksys_unshareshare allows a process to 'unshare' part of the process* context which was originally shared using clone. copy_** functions used by do_fork() cannot be used here directly* because they modify an inactive task_struct that is being* constructed
SYSCALL_DEFINE1
__warn
exit_mmTurn us into a lazy TLB process if we* aren't already..
do_exit
do_group_exitTake down every thread in the group. This is called by fatal signals* as well as by sys_exit_group (below).
wait_task_zombieHandle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold* read_lock(&tasklist_lock) on entry. If we return zero, we still hold* the lock and this task is uninteresting. If we return nonzero, we have
do_wait
__do_softirq
warn_sysctl_write
deprecated_sysctl_warning
ptrace_tracemeptrace_traceme -- helper for PTRACE_TRACEME* Performs checks and sets PT_PTRACED.* Should be used by all ptrace implementations for PTRACE_TRACEME.
ptrace_setoptions
calculate_sigpending
print_dropped_signal
task_join_group_stop
dequeue_signalDequeue a signal and return the element to the caller, which is* expected to free it.* All callers have to hold the siglock.
sigqueue_free
may_ptrace_stop
ptrace_stopThis must be called with current->sighand->siglock held.* This should be the path for all ptrace stops.* We always set current->last_siginfo while stopped here.* That makes it a way to test a stopped process for
ptrace_notify
do_signal_stopdo_signal_stop - handle group stop for SIGSTOP and other stop signals*@signr: signr causing group stop if initiating* If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr* and participate in it
do_jobctl_trapdo_jobctl_trap - take care of ptrace jobctl traps* When PT_SEIZED, it's used for both group stop and explicit* SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with* accompanying siginfo. If stopped, lower eight bits of exit_code contain
do_freezer_trapdo_freezer_trap - handle the freezer jobctl trap* Puts the task into frozen state, if only the task is not about to quit.* In this case it drops JOBCTL_TRAP_FREEZE.* CONTEXT:* Must be called with @current->sighand->siglock held,
ptrace_signal
get_signal
signal_deliveredsignal_delivered - *@ksig: kernel signal struct*@stepping: nonzero if debugger single-step or block-step in use* This function should be called when a signal has successfully been* delivered
sys_restart_syscall
__set_task_blocked
set_user_sigmaskThe api helps set app-provided sigmasks
set_compat_user_sigmask
SYSCALL_DEFINE4sys_rt_sigprocmask - change the list of currently blocked signals*@how: whether to add, remove, or set signals*@nset: stores pending signals*@oset: previous value of signal mask if non-null*@sigsetsize: size of sigset_t type
COMPAT_SYSCALL_DEFINE4
do_sigpending
kernel_sigactionFor kthreads only, must not be used if cloned with CLONE_SIGHAND
do_sigaltstack
SYSCALL_DEFINE3sys_sigprocmask - examine and change blocked signals*@how: whether to add, remove, or set signals*@nset: signals to add or remove (if non-null)*@oset: previous value of signal mask if non-null* Some platforms have their own version with special arguments;
sigsuspend
sys_getppid
do_sys_times
SYSCALL_DEFINE2This needs some heavy checking
set_special_pids
ksys_setsid
override_releaseWork around broken programs that cannot handle "Linux 3.0".* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40* And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be* 2.6.60.
SYSCALL_DEFINE2
SYSCALL_DEFINE2Only setdomainname; getdomainname can be implemented by calling* uname()
SYSCALL_DEFINE2Back compatibility for getrlimit. Needed for some apps.
COMPAT_SYSCALL_DEFINE2
SYSCALL_DEFINE1
prctl_set_mm
SYSCALL_DEFINE5
call_usermodehelper_exec_asyncThis is the task which runs the usermode application
process_one_workprocess_one_work - process single work*@worker: self*@work: work to process* Process @work
set_pf_worker
check_flush_dependencyheck_flush_dependency - check for flush dependency sanity*@target_wq: workqueue being flushed*@target_work: work item being flushed (NULL for workqueue flushes)* %current is trying to flush the whole @target_wq or @target_work on it
set_kthread_struct
kthread
create_kthread
kthreadd
kthread_associate_blkcgkthread_associate_blkcg - associate blkcg to current kthread*@css: the cgroup info* Current thread must be a kthread. The thread is running jobs on behalf of* other threads. In some cases, we expect the jobs attach cgroup info of
kthread_blkcgkthread_blkcg - get associated blkcg css of current kthread* Current thread must be a kthread.
unshare_nsproxy_namespacesCalled from unshare. Unshare all the namespaces part of nsproxy.* On success, returns the new nsproxy.
__put_cred__put_cred - Destroy a set of credentials*@cred: The record to release* Destroy a set of credentials on which no references remain.
override_credsverride_creds - Override the current process's subjective credentials*@new: The credentials to be assigned* Install a set of temporary override subjective credentials on the current* process, returning the old set for later reversion.
revert_credsvert_creds - Revert a temporary subjective credentials override*@old: The credentials to be restored* Revert a temporary set of override subjective credentials to an old set,* discarding the override set.
migrate_to_reboot_cpu
async_schedule_node_domainasync_schedule_node_domain - NUMA specific version of async_schedule_domain*@func: function to execute asynchronously*@data: data pointer to pass to the function*@node: NUMA node that we want to schedule this on or close to*@domain: the domain
sched_forkrk()/clone()-time setup:
finish_task_switchsh_task_switch - clean up after a task-switch*@prev: the thread we just switched away from
schedule_tailschedule_tail - first thing a freshly forked thread must call.*@prev: the thread we just switched away from.
do_task_dead
schedule_idlesynchronize_rcu_tasks() makes sure that no task is stuck in preempted* state (have scheduled out non-voluntarily) by making sure that all* tasks have either left the run queue or have gone into user space
do_sched_yieldsys_sched_yield - yield the current processor to other threads.* This function yields the current CPU to other tasks. If there are no* other threads running on this CPU then this function will return.* Return: 0.
io_schedule_prepare
io_schedule_finish
play_idle_precise
is_kthread_should_stop
ipi_sync_rq_state
membarrier_private_expedited
psi_memstall_enterpsi_memstall_enter - mark the beginning of a memory stall section*@flags: flags to handle nested sections* Marks the calling task as being stalled due to a lack of memory,* such as waiting for a refault or performing reclaim.
psi_memstall_leavepsi_memstall_leave - mark the end of an memory stall section*@flags: flags to handle nested memdelay sections* Marks the calling task as no longer stalled due to lack of memory.
graph_lock
graph_unlock
lockdep_off
lockdep_on
lockdep_init_mapInitialize a lock instance's lock-class mapping info:
lock_set_class
lock_downgrade
lock_acquireWe are not always called with irqs disabled - do that here,* and also avoid lockdep recursion:
lock_release
lock_is_held_type
lock_pin_lock
lock_repin_lock
lock_unpin_lock
lockdep_reset
free_zapped_rcu
lockdep_free_key_range_regUsed in module
print_held_locks_bug
debug_check_no_locks_held
mark_wakeup_next_waiterRemove the top waiter from the current tasks pi waiter tree and* queue it up.* Called with lock->wait_lock held and interrupts disabled.
remove_waiterRemove a waiter from a lock and give up* Must be called with lock->wait_lock held and interrupts disabled. I must* have just failed to try_to_take_rt_mutex().
debug_rt_mutex_print_deadlock
spin_dump
rwlock_bug
freeze_processesze_processes - Signal user space processes to enter the refrigerator.* The current thread will not be frozen. The same process that calls* freeze_processes must later call thaw_processes.* On success, returns 0
check_syslog_permissions
devkmsg_write
irq_thread_dtor
debug_lockdep_rcu_enabled
synchronize_rcu_trivialDefinitions for trivial CONFIG_PREEMPT=n-only torture testing.* This implementation does not necessarily work well with CPU hotplug.
klp_ftrace_handler
klp_copy_processCalled from copy_process() during fork
check_for_stack
__refrigeratorRefrigerator is place where frozen processes are stored :-).
set_freezableset_freezable - make %current freezable* Mark %current freezable and enter refrigerator if necessary.
schedule_timeoutschedule_timeout - sleep until timeout*@timeout: timeout value in jiffies* Make the current task sleep until @timeout jiffies have* elapsed
do_nanosleep
hrtimer_nanosleep
posix_timer_by_id
posix_timer_add
do_timer_createCreate a POSIX.1b interval timer.
__lock_timerCLOCKs: The POSIX standard calls for a couple of clocks and allows us* to implement others
SYSCALL_DEFINE1Delete a POSIX.1b interval timer.
SYSCALL_DEFINE4
lookup_taskFunctions for validating access to tasks.
check_rlimit
do_cpu_nanosleep
posix_cpu_nsleep
SYSCALL_DEFINE3
COMPAT_SYSCALL_DEFINE3
sys_ni_posix_timers
SYSCALL_DEFINE4
get_futex_keyget_futex_key() - Get parameters which are the keys for a futex*@uaddr: virtual address of the futex*@fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED*@key: address where result is stored
fault_in_user_writeableault_in_user_writeable() - Fault in user address and verify RW access*@uaddr: pointer to faulting user space address* Slow path to fixup the fault we just took in the atomic write* access to @uaddr
refill_pi_state_cachePI code:
alloc_pi_state
put_pi_stateDrops a reference to the pi_state object and frees or caches it* when the last reference is gone.
futex_atomic_op_inuser
__queue_me
futex_wait
futex_wait_requeue_piex_wait_requeue_pi() - Wait on uaddr and take uaddr2*@uaddr: the futex we initially wait on (non-pi)*@flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc
SYSCALL_DEFINE2sys_set_robust_list() - Set the robust-futex list head of a task*@head: pointer to the list-head*@len: length of the list-head, as userspace expects
COMPAT_SYSCALL_DEFINE2
do_init_moduleThis is where the real work happens.* Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb* helper command 'lx-symbols'.
fill_acWrite an accounting entry for an exiting process* The acct_process() call is the workhorse of the process* accounting system. The struct acct is built here and then written* into the accounting file. This function should only be called from
do_acct_process
acct_collectacct_collect - collect accounting information into pacct_struct*@exitcode: task exit code*@group_dead: not 0, if this thread is the last one in the process.
crash_save_cpu
COMPAT_SYSCALL_DEFINE3
current_cgns_cgroup_from_rootlook up cgroup associated with current task's cgroup namespace on the* specified hierarchy
apply_cgroup_root_flags
cgroup_init_fs_contextInitialise the cgroup filesystem creation/reconfiguration context. Notably,* we select the namespace we're going to use.
cgroup_file_write
cgroup_procs_write_permission
css_create
proc_cgroup_showproc_cgroup_show()* - Print task's cgroup paths into seq_file, one line for each hierarchy* - Used for /proc//cgroup.
cgroup1_reconfigure
cgroup_enter_frozenEnter frozen/stopped state, if not yet there. Update cgroup's counters,* and revisit the state of the cgroup, if necessary.
cgroup_leave_frozenConditionally leave frozen/stopped state
cpuset_forkMake sure the new task conform to the current state of its parent,* which could have been changed by cpuset just after it inherits the* state from the parent and before it sits on the cgroup's task list.
cpuset_init_current_mems_allowed
cpuset_nodemask_valid_mems_allowedpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed*@nodemask: the nodemask to be checked* Are any of the nodes in the nodemask allowed in current->mems_allowed?
__cpuset_node_allowedpuset_node_allowed - Can we allocate on a memory node?*@node: is this an allowed node?*@gfp_mask: memory allocation flags* If we're in interrupt, yes, we can always allocate. If @node is set in* current's mems_allowed, yes
cpuset_spread_nodepuset_mem_spread_node() - On which node to begin search for a file page* cpuset_slab_spread_node() - On which node to begin search for a slab page* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for* tasks in a cpuset with is_spread_page or
cpuset_mem_spread_node
cpuset_slab_spread_node
cpuset_print_current_mems_allowedpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed* Description: Prints current's name, cpuset name, and cached copy of its* mems_allowed to the kernel log.
userns_install
zap_pid_ns_processes
audit_receive_msg
audit_get_tty
audit_log_task_info
audit_set_loginuidaudit_set_loginuid - set current task's loginuid*@loginuid: loginuid value* Returns 0.* Called (set) from fs/proc/base.c::proc_loginuid_write().
audit_log_execve_info
audit_log_exit
__audit_getname__audit_getname - add a name to the list*@name: name to add* Add a name to the list of audit names for this context.* Called from fs/namei.c:getname().
audit_log_task
kcov_task_init
kcov_ioctl_locked
kcov_common_handleSee the comment before kcov_remote_start() for usage details.
kgdb_flush_swbreak_addrSome architectures need cache flushes when we set/clear a* breakpoint:
gdb_cmd_queryHandle the 'q' query packets
gdb_serial_stubThis function performs all gdbserial command procesing
seccomp_may_assign_mode
secure_computing_strict
prctl_get_seccomp
seccomp_set_mode_strictseccomp_set_mode_strict: internal function for setting strict seccomp* Once current->seccomp.mode is non-zero, it may not be changed.* Returns 0 on success or -EINVAL on failure.
get_uts
__delayacct_blkio_start
__delayacct_freepages_start
__delayacct_freepages_end
__delayacct_thrashing_start
__delayacct_thrashing_end
__rb_allocate_pages
probe_wakeup
move_to_next_cpu
ftrace_push_return_traceAdd a function return address to the trace stack on thread info.
function_graph_enter
ftrace_pop_return_traceRetrieve a function return address to the trace stack on thread info.
ftrace_return_to_handler the original return address.
filter_pred_commFilter predicate for COMM.
____bpf_probe_write_user
bpf_get_probe_write_proto
____bpf_send_signal
process_fetch_insnNote that we don't verify it, since the code does not come from user space
fetch_store_stringFetch a null-terminated string. Caller MUST set *(u32 *)dest with max* length and relative data location.
fetch_store_strlenReturn the length of string -- including null terminal byte
translate_user_vaddr
uprobe_dispatcher
uretprobe_dispatcher
dev_map_update_elem
dev_map_hash_update_elem
bpf_prog_offload_init
bpf_map_offload_map_alloc
stack_map_get_build_id_offset
perf_event_enable_on_execEnable all of a task's events that have been marked enable-on-exec.* This expects task == current.
perf_event_task_enable
perf_event_task_disable
perf_sample_regs_user
perf_virt_to_phys
perf_iterate_sbIterate all events that need to receive side-band events.* For new callers; ensure that account_pmu_sb_event() includes* your event, otherwise it might not get delivered.
perf_event_exec
perf_addr_filters_adjustAdjust all task's events' filters to the new vma
get_perf_callchain
__create_xol_area
get_xol_areaget_xol_area - Allocate process's xol_area if necessary.* This area will be used for storing instructions for execution out of line.* Returns the allocated area or NULL.
uprobe_get_trap_addr
get_utaskAllocate a uprobe_task object for the task if if necessary.* Called when the thread hits a breakpoint.* Returns:* - pointer to new uprobe_task on success* - NULL otherwise
uprobe_warn
dup_xol_work
uprobe_copy_processCalled in context of a new clone/fork from copy_process.
get_trampoline_vaddrCurrent area->vaddr notion assume the trampoline address is always* equal area->vaddr.* Returns -1 in case the xol_area is not allocated.
prepare_uretprobe
find_active_uprobe
handler_chain
handle_trampoline
handle_singlestepPerform required fix-ups and disable singlestep.* Allow pending signals to take effect.
uprobe_notify_resumeOn breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and* allows the thread to return from interrupt. After that handle_swbp()* sets utask->active_uprobe.* On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
uprobe_pre_sstep_notifierprobe_pre_sstep_notifier gets called from interrupt context as part of* notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
uprobe_post_sstep_notifierprobe_post_sstep_notifier gets called in interrupt context as part of notifier* mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
__context_tracking_enterext_tracking_enter - Inform the context tracking that the CPU is going* enter user or guest space mode
rseq_get_rseq_cs
stackleak_erase
stackleak_track_stack
do_mount_root
handle_initrd
unaccount_page_cache_page
__add_to_page_cache_locked
dio_warn_stale_pagecacheWarn about a page cache invalidation failure during a direct I/O write.
__generic_file_write_iter__generic_file_write_iter - write data to a file*@iocb: IO state structure (file, offset, etc.)*@from: iov_iter with data to write* This function does all the work needed for actually writing data to a* file
dump_header
out_of_memory_of_memory - kill the "best" process when we run out of memory*@oc: pointer to struct oom_control* If we run out of memory, we have the choice between either* killing a random task (bad), letting the system crash (worse)
balance_dirty_pagesalance_dirty_pages() must be called by processes which are generating dirty* data
balance_dirty_pages_ratelimitedalance_dirty_pages_ratelimited - balance dirty memory state*@mapping: address_space which was dirtied* Processes which are dirtying memory should call in here once for each page* which was newly dirtied. The function will periodically check the system's
account_page_dirtiedHelper function for set_page_dirty family.* Caller must hold lock_page_memcg().* NOTE: This relies on being atomic wrt interrupts.
account_page_redirtyCall this whenever redirtying a page, to de-account the dirty counters* (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written* counters (NR_WRITTEN, WB_WRITTEN) in long term
may_write_to_inode
current_may_throttleIf a kernel thread (such as nfsd for loop-back mounts) services* a backing device by writing to the page cache it sets PF_LESS_THROTTLE.* In that case we should only throttle if the backing device it is* writing to is congested
shrink_node
throttle_direct_reclaimThrottle direct reclaimers if backing storage is backed by the network* and the PFMEMALLOC reserve for the preferred node is getting dangerously* depleted. kswapd will continue to make progress and wake the processes* when the low watermark is reached.
mem_cgroup_shrink_nodeOnly used by soft limit reclaim. Do not reuse for anything else.
__node_reclaimTry to free up some pages from this node through reclaim.
node_reclaim
shmem_get_unmapped_area
randomize_stack_top
vm_mmap_pgoff
vmacache_valid_mmThis task may be accessing a foreign mm via (for example)* get_user_pages()->find_vma(). The vmacache is task-local and this* task's vmacache pertains to a different mm (ie, its own). There is* nothing we can do here.
vmacache_update
vmacache_find
__mm_populate__mm_populate - populate and/or mlock pages within a range of address space.* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap* flags. VMAs must be already marked with the desired vm_flags, and* mmap_sem must not be held.
get_user_pagesThis is the same as get_user_pages_remote(), just with a* less-flexible calling convention where we assume that the task* and mm being operated on are the current task's and don't allow* passing of a locked parameter. We also obviously don't pass
get_user_pages_lockedWe can leverage the VM_FAULT_RETRY functionality in the page fault* paths better by using either get_user_pages_locked() or* get_user_pages_unlocked().* get_user_pages_locked() is suitable to replace the form:* down_read(&mm->mmap_sem);* do_something()
get_user_pages_unlockedget_user_pages_unlocked() is suitable to replace the form:* down_read(&mm->mmap_sem);* get_user_pages(tsk, mm,
__gup_longterm_unlocked
kobjsizeReturn the total memory allocated for this pointer, not* just what the caller asked for.* Doesn't have to be accurate, i.e. may have races.
__vmalloc_user_flags
SYSCALL_DEFINE1sys_brk() for the most part doesn't need the global kernel* lock, except when an application is doing something nasty* like trying to un-brk an area that has already been mapped* to a regular file. in this case, the unmapping will need
validate_mmap_requestdetermine whether a mapping should be permitted and, if so, what sort of* mapping we're capable of supporting
determine_vm_flagswe've determined that we can make the mapping, now translate what we* now know into VMA flags
do_mmap_privateset up a private mapping or an anonymous shared mapping
do_mmaphandle mapping creation for uClinux
do_munmaplease a mapping* - under NOMMU conditions the chunk to be unmapped must be backed by a single* VMA, though it need not cover the whole VMA
vm_munmap
do_mremapxpand (or shrink) an existing mapping, potentially moving it at the same* time (controlled by the MREMAP_MAYMOVE flag and available VM space)* under NOMMU conditions, we only permit changing a mapping's size, and only* as long as it stays within the
SYSCALL_DEFINE5
sync_mm_rss
add_mm_rss_vec
print_bad_pteThis function is called to print an error when a bad pte* is found. For example, we might have a PFN-mapped pte in* a region that doesn't allow it.* The calling function must still handle the error.
print_vma_addrPrint the name of a VMA.
do_mincoreDo a chunk of "sys_mincore()". We've already checked* all the arguments, we hold the mmap semaphore: we should* just return the amount of info we're asked for.
SYSCALL_DEFINE3The mincore(2) system call
mlock_fixupmlock_fixup - handle mlock[all]/munlock[all] requests.* Filters out "special" vmas -- VM_LOCKED never gets set for these, and* munlock is a no-op. However, for some special vmas, we go ahead and* populate the ptes.
apply_vma_lock_flags
count_mm_mlocked_page_nrGo through vma areas and sum size of mlocked* vma pages, as return value.* Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)* is also counted.* Return value: previously mlocked page counts
do_mlock
SYSCALL_DEFINE2
apply_mlockall_flagsTake the MCL_* flags passed into mlockall (or 0 if called from munlockall)* and translate into the appropriate modifications to mm->def_flags and/or the* flags for all current VMAs.* There are a couple of subtleties with this
SYSCALL_DEFINE1
sys_munlockall
SYSCALL_DEFINE1
do_mmapThe caller must hold down_write(¤t->mm->mmap_sem).
mmap_region
unmapped_area
unmapped_area_topdown
arch_get_unmapped_areaGet an address range which is currently unmapped.* For shmat() with addr=0.* Ugly calling convention alert:* Return value with the low bits set means error value,* ie* if (ret & ~PAGE_MASK)* error = ret;
arch_get_unmapped_area_topdownThis mmap-allocator allocates new areas top-down from below the* stack's low limit (the base):
get_unmapped_area
__vm_munmap
SYSCALL_DEFINE5Emulation of deprecated remap_file_pages() syscall.
do_brk_flagshis is really a simplified "do_mmap". it only handles* anonymous maps. eventually we may be able to do some* brk-specific accounting here.
vm_brk_flags
may_expand_vmReturn true if the calling process may expand its vm space by the passed* number of pages
special_mapping_mremap
mprotect_fixup
do_mprotect_pkeypkey==-1 when doing a legacy mprotect()
vma_to_resize
mremap_to
SYSCALL_DEFINE5Expand (or shrink) an existing mapping, potentially moving it at the* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise* This option implies MREMAP_MAYMOVE.
SYSCALL_DEFINE3MS_SYNC syncs the entire file - including mappings
bad_page
warn_alloc_show_mem
warn_alloc
__alloc_pages_may_oom
__gfp_pfmemalloc_flagsDistinguish requests which really need access to full memory* reserves from oom victims which can live with a portion of it
should_reclaim_retryChecks whether it makes sense to retry the reclaim to make a forward progress* for the given allocation request.* We give up when we either have tried MAX_RECLAIM_RETRIES in a row* without success, or when we couldn't even meet the watermark if we
__alloc_pages_slowpath
madvise_willneedSchedule all required I/O operations. Do not wait for completion.
madvise_free_pte_range
madvise_dontneed_free
madvise_removeApplication wants to free up the pages and associated backing store.* This is effectively punching a hole into the middle of a file.
SYSCALL_DEFINE3The madvise(2) system call
SYSCALL_DEFINE1
mem_cgroup_throttle_swaprate
__frontswap_unuse_pages
hugetlb_no_page
do_set_mempolicySet the process memory policy
do_get_mempolicyRetrieve NUMA policy
new_pageAllocate a new page for page migration based on vma policy
do_mbind
mempolicy_slab_nodeDepending on the memory policy provide a node from which to allocate the* next slab entry.
init_nodemask_of_mempolicy_nodemask_of_mempolicy* If the current task's mempolicy is "default" [NULL], return 'false'* to indicate default policy
__mpol_dupSlow path of a mempolicy duplicate
slob_free_pages
kmem_freepagesInterface to system's page release.
alternate_node_allocTry allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.* If we are in_interrupt, then process context, including cpusets and* mempolicy, may not apply and should not be used for allocation policy.
__do_cache_alloc
__free_slab
set_track
kasan_enable_current
kasan_disable_current
print_error_description
print_address_stack_frame
report_enabled
__unmap_and_move
migrate_pagesmigrate_pages - migrate the pages specified in a list, to the free pages* supplied as the target for the page migration*@from: The list of pages to be migrated.*@get_new_page: The function used to allocate free pages to be used
__thp_get_unmapped_area
thp_get_unmapped_area
should_force_charge
get_mem_cgroup_from_currentIf current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
mem_cgroup_oom
mem_cgroup_oom_synchronizemem_cgroup_oom_synchronize - complete memcg OOM handling*@handle: actually kill/wait or just clean up the OOM state* This has to be called at the end of a page fault if the memcg OOM* handler was enabled
mem_cgroup_handle_over_highScheduled by try_charge() to be executed from the userland return path* and reclaims memory over the high limit.
try_charge
kill_procSend all the processes who have the page mapped a signal.* ``action optional'' if they are not immediately affected by the error* ``action required'' if error happened in current execution context
create_objectCreate the metadata (struct kmemleak_object) corresponding to an allocated* memory block and add it to the object_list and object_tree_root.
scan_should_stopMemory scanning is a long process and it needs to be interruptable. This* function checks whether such interrupt condition occurred.
get_vaddr_framesget_vaddr_frames() - map virtual addresses to pfns*@start: starting user address*@nr_frames: number of pages / pfns from start to map*@gup_flags: flags modifying lookup behaviour*@vec: structure which receives pages / pfns of the addresses mapped.
sysvipc_proc_open
ksys_msgget
ksys_msgctl
compat_ksys_msgctl
do_msgsnd
do_msgrcv
ksys_semget
check_qopheck_qop: Test if a queued operation sleeps on the semaphore semnum
ksys_semctl
compat_ksys_semctl
get_undo_listIf the task doesn't already have a undo_list, then allocate one* here. We guarantee there is only one thread using this undo list,* and current is THE ONE* If this allocation and assignment succeeds, but later
do_semtimedop
newsegwseg - Create a new shared memory segment*@ns: namespace*@params: ptr to the structure that contains key, size and shmflg* Called with shm_ids.rwsem held as a writer.
ksys_shmget
ksys_shmctl
compat_ksys_shmctl
do_shmatFix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.* NOTE! Despite the name, this is NOT a direct system call entrypoint. The* "raddr" thing points to kernel space, and there has to be a wrapper around* this.
ksys_shmdtdetach and kill segment if marked destroyed.* The work is done in shm_close.
get_ipc
proc_ipc_dointvec_minmax_orphans
proc_ipc_sem_dointvec
mqueue_init_fs_context
wq_addAdds current to info->e_wait_q[sr] before element with smaller prio
do_mq_open
SYSCALL_DEFINE1
get_mq
punt_bios_to_rescuer
bio_alloc_bioset_alloc_bioset - allocate a bio for I/O*@gfp_mask: the GFP_* mask given to the slab allocator*@nr_iovecs: number of iovecs to pre-allocate*@bs: the bio_set to allocate from
bio_uncopy_user_uncopy_user - finish previously mapped bio*@bio: bio being terminated* Free pages allocated from bio_copy_user_iov() and write back data* to user space in case of a read.
generic_make_requestgeneric_make_request - hand a buffer to its device driver for I/O*@bio: The bio describing the location in memory and on the device.* generic_make_request() is used to make I/O requests of block* devices
submit_biosubmit_bio - submit a bio to the block device layer for I/O*@bio: The &struct bio which describes the I/O* submit_bio() is very similar in purpose to generic_make_request(), and* uses that function to do most of the work. Both are fairly rough
blk_check_plugged
blk_finish_pluglk_finish_plug - mark the end of a batch of submitted I/O*@plug: The &struct blk_plug passed to blk_start_plug()* Description:* Indicate that a batch of I/O submissions is complete. This function* must be paired with an initial call to blk_start_plug()
blk_polllk_poll - poll for IO completions*@q: the queue*@cookie: cookie passed back at IO submission time*@spin: whether to spin for completions* Description:* Poll for completions on the passed in queue. Returns number of* completed entries found
blk_mq_sched_assign_ioc
scsi_cmd_ioctl
blkcg_maybe_throttle_currentlkcg_maybe_throttle_current - throttle the current task if it has been marked* This is only called if we've been marked with set_notify_resume()
blkcg_schedule_throttlelkcg_schedule_throttle - this task needs to check for throttling*@q: the request queue IO was submitted on*@use_memdelay: do we charge this to memory delay for PSI* This is called by the IO controller when we know there's delay accumulated
bfq_bio_merge
bfq_get_queue
bfq_split_bfqqReturns NULL if a new bfqq should be allocated, or the old bfqq if this* was the last process referring to that bfqq.
key_set_index_keyFinalise an index key to include a part of the description actually in the* index key, to set the domain tag and to calculate the hash.
key_change_session_keyringReplace a process's session keyring on behalf of one of its children when* the target process is about to resume userspace execution.
request_key_auth_newCreate an authorisation token for /sbin/request-key or whoever to gain* access to the caller's security data.
cap_inh_is_cappedDetermine whether the inheritable capabilities are limited to the old* permitted set. Returns 1 if they are limited, 0 if they are not.
cap_safe_niceRationale: code calling task_setscheduler, task_setioprio, and* task_setnice, assumes that*
cap_task_prctlap_task_prctl - Implement process control functions for this security module*@option: The process control function requested*@arg2, @arg3, @arg4, @arg5: The argument data for this function* Allow process control functions (sys_prctl()) to alter
cap_mmap_addrap_mmap_addr - check if able to map given addr*@addr: address attempting to be mapped* If the process is attempting to map memory below dac_mmap_min_addr they need* CAP_SYS_RAWIO. The other parameters to this function are unused by the
ordered_lsm_init
mmap_prot
cred_init_securityalise the security for the init task
selinux_bprm_committing_credsPrepare a process for imminent new credential changes due to exec
selinux_bprm_committed_credsClean up the process immediately after the installation of new credentials* due to exec
selinux_nlmsg_perm
smack_sk_alloc_securitysmack_sk_alloc_security - Allocate a socket blob*@sk: the socket*@family: unused*@gfp_flags: memory allocation flags* Assign Smack pointers to current* Returns 0 on success, -ENOMEM is there's no memory
smack_socket_post_createsmack_socket_post_create - finish socket setup*@sock: the socket*@family: protocol family*@type: unused*@protocol: unused*@kern: unused* Sets the netlabel information on the socket* Returns 0 on success, and error code otherwise
smack_initsmack_init - initialize the smack system* Returns 0 on success, -ENOMEM is there's no memory
smack_privilegedsmack_privileged - are all privilege requirements met*@cap: The requested capability* Is the task privileged and allowed to be privileged* by the onlycap rule.* Returns true if the task is allowed to be privileged, false if it's not.
dump_common_audit_datadump_common_audit_data - helper to dump common audit data*@a : common audit data
tomoyo_managermoyo_manager - Check whether the current process is a policy manager.* Returns true if the current process is permitted to modify policy* via /sys/kernel/security/tomoyo/ interface.* Caller holds tomoyo_read_lock().
tomoyo_warn_oommoyo_warn_oom - Print out of memory warning message.*@function: Function's name.
tomoyo_domainmoyo_domain - Get "struct tomoyo_domain_info" for current thread.* Returns pointer to "struct tomoyo_domain_info" for current thread.
tomoyo_cred_preparemoyo_cred_prepare - Target for security_prepare_creds().*@new: Pointer to "struct cred".*@old: Pointer to "struct cred".*@gfp: Memory allocation flags.* Returns 0.
tomoyo_file_openmoyo_file_open - Target for security_file_open().*@f: Pointer to "struct file".*@cred: Pointer to "struct cred".* Returns 0 on success, negative value otherwise.
tomoyo_get_exemoyo_get_exe - Get tomoyo_realpath() of current process.* Returns the tomoyo_realpath() of current process on success, NULL otherwise.* This function uses kzalloc(), so the caller must call kfree()* if this function didn't return NULL.
d_namespace_pathd_namespace_path - lookup a name associated with a given path*@path: path to lookup (NOT NULL)*@buf: buffer to store path to (NOT NULL)*@name: Returns - pointer for start of path name with in @buf (NOT NULL)*@flags: flags controlling path
aa_setprocattr_changehataa_setprocattr_chagnehat - handle procattr interface to change_hat*@args: args received from writing to /proc//attr/current (NOT NULL)*@size: size of the args*@flags: set of flags governing behavior* Returns: %0 or error code if change_hat fails
apparmor_file_open
apparmor_bprm_committing_credsapparmor_bprm_committing_creds - do task cleanup on committing new creds*@bprm: binprm for the exec (NOT NULL)
set_init_ctxset_init_ctx - set a task context and profile on the first task.* TODO: allow setting an alternate profile than unconfined
__aa_transition_rlimits__aa_transition_rlimits - apply new profile rlimits*@old_l: old label on task (NOT NULL)*@new_l: new label with rlimits to apply (NOT NULL)
report_accessdefers execution because cmdline access can sleep
lockdown_is_locked_downlockdown_is_locked_down - Find out if the kernel is locked down*@what: Tag to use in notice generated if lockdown is in effect
integrity_audit_msg
ksys_chdir
SYSCALL_DEFINE1
ksys_chroot
SYSCALL_DEFINE1Careful here! We test whether the file pointer is NULL before* releasing the fd. This ensures that one clone task can't release* an fd while another clone is opening it.
cp_old_statFor backward compatibility? Maybe this should be moved* into arch/i386 instead?
acct_arg_sizeThe nascent bprm->mm is not visible until exec_mmap() but it can* use a lot of memory, account these pages in current->mm temporary* for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we* change the counter back via acct_arg_size(0).
bprm_mm_initCreate a new mm_struct and populate it with a temporary stack* vm_area_struct. We don't have enough context at this point to set the stack* flags, permissions, and offset, so we use temporary values. We'll update* them later in setup_arg_pages().
setup_arg_pagesFinalizes the stack vm_area_struct. The flags and permissions are updated,* the stack is optionally relocated, and some extra space is added.
exec_mmap
flush_old_execCalling this is the point of no return. None of the failures will be* seen by userspace since either the process is already taking a fatal* signal (via de_thread() or coredump), or will have SEGV raised
setup_new_exec
finalize_execRuns immediately before start_thread() takes over.
prepare_bprm_credsPrepare credentials and lock ->cred_guard_mutex.* install_exec_creds() commits the new creds and drops the lock.* Or, if exec fails before, free_bprm() should release ->cred and* and unlock.
free_bprm
install_exec_credsstall the new credentials for this executable
exec_binprm
__do_execve_filesys_execve() executes a new program.
set_binfmt
set_nameidata
restore_nameidata
set_root
nd_jump_linkHelper to directly jump to a known parsed path from ->get_link,* caller must have taken a reference to path beforehand.
may_follow_linkmay_follow_link - Check symlink following for unsafe situations*@nd: nameidata pathwalk data* In the case of the sysctl_protected_symlinks sysctl being enabled,* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
path_initmust be paired with terminate_walk()
select_estimate_accuracy
poll_select_finish
max_select_fd
core_sys_selectWe can actually return ERESTARTSYS instead of EINTR, but I'd* like to be certain this leads to no problems. So I return* EINTR just for safety.* Update: ERESTARTSYS breaks at least the xview clock binary, so
SYSCALL_DEFINE3
compat_core_sys_selectWe can actually return ERESTARTSYS instead of EINTR, but I'd* like to be certain this leads to no problems. So I return* EINTR just for safety.* Update: ERESTARTSYS breaks at least the xview clock binary, so
inode_lru_isolateIsolate the inode from the LRU in preparation for freeing it
alloc_fd
get_unused_fd_flags
put_unused_fd
fd_install
__close_fd_get_filevariant of __close_fd that gets a ref on the file for later fput
__fget
__fget_lightLightweight file lookup - no refcnt increment if fd table isn't shared.* You can use this instead of fget if you satisfy all of the following* conditions:* 1) You must call fput_light before exiting the syscall and returning control* to userspace (i
set_close_on_execWe only lock f_pos if we have threads or if the file might be* shared with another process. In both cases we'll have an elevated* file count (done either by fdget() or by fork()).
get_close_on_exec
replace_fd
ksys_dup3
SYSCALL_DEFINE2
__is_local_mountpoint__is_local_mountpoint - Test to see if dentry is a mountpoint in the* current mount namespace
check_mnt
do_umount
may_mountIs the caller allowed to modify his namespace?
mnt_ns_loop
attach_recursive_mnt@source_mnt : mount tree to be attached*@nd : place the mount tree @source_mnt is attached*@parent_nd : if non-null, detach the source_mnt from its parent and* store the parent mount and mountpoint dentry
open_detached_copy
SYSCALL_DEFINE3Create a kernel mount representation for a new, prepared superblock* (specified by fs_fd) and attach to an open_tree-like file descriptor.
SYSCALL_DEFINE2pivot_root Semantics:* Moves the root file system of the current process to the directory put_old,* makes new_root as the new root file system of the current process, and sets* root/cwd of all processes which had them on the current root to new_root
init_mount_tree
current_chrooted
mount_too_revealing
mntns_install
wb_workfnHandle writeback of dirty data for the device backed by this bdi. Also* reschedules periodically and does kupdated style flushing.
block_dump___mark_inode_dirty
splice_direct_to_actorsplice_direct_to_actor - splices data directly between two non-pipes*@in: file to splice from*@sd: actor information on where to splice to*@actor: handles the data splicing* Description:* This is a special case helper to splice directly between two
d_pathd_path - return the path of a dentry*@path: path to report*@buf: buffer to return value in*@buflen: buffer length* Convert a dentry into an ASCII path name
SYSCALL_DEFINE2NOTE! The user-level library version returns a* character pointer
unshare_fs_struct
current_umask
alloc_fs_contextalloc_fs_context - Create a filesystem context.*@fs_type: The filesystem type.*@reference: The dentry from which this one derives (or NULL)*@sb_flags: Filesystem/superblock flags (SB_*)*@sb_flags_mask: Applicable members of @sb_flags
SYSCALL_DEFINE2Open a filesystem by name so that it can be configured for mounting.* We are allowed to specify a container in which the filesystem will be* opened, thereby indicating which namespaces will be used (notably, which
SYSCALL_DEFINE3Pick a superblock into a context for reconfiguration.
SYSCALL_DEFINE2There are no bdflush tunables left. But distributions are* still running obsolete flush daemons, so we terminate them here.* Use of bdflush() is deprecated and will be removed in a future kernel.
mpage_alloc
fcntl_dirnotifyWhen a process calls fcntl to attach a dnotify watch to a directory it ends* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be* attached to the fsnotify_mark.
inotify_new_group
SYSCALL_DEFINE2anotify syscalls
signalfd_poll
signalfd_dequeue
do_signalfd4
handle_userfaultThe locking rules involved in returning VM_FAULT_RETRY depending on* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and* FAULT_FLAG_KILLABLE are not straightforward
userfaultfd_event_wait_completion
SYSCALL_DEFINE1
aio_free_ring
aio_setup_ring
ioctx_allocx_alloc* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
lookup_ioctx
SYSCALL_DEFINE2sys_io_setup:* Create an aio_context capable of receiving at least nr_events
COMPAT_SYSCALL_DEFINE2
SYSCALL_DEFINE1sys_io_destroy:* Destroy the aio_context specified. May cancel any outstanding * AIOs and block on completion. Will fail with -ENOSYS if not* implemented. May fail with -EINVAL if the context pointed to* is invalid.
io_grab_files
io_sq_offload_start
io_sqe_buffer_register
__io_worker_unuseNote: drops the wqe->lock if returning true! The caller must re-acquire* the lock in that case. Some callers need to restart handling if this* happens, so we can't just re-acquire the lock on behalf of the caller.
io_worker_exit
io_worker_start
io_worker_handle_work
set_encryption_policy
flock_make_lockFill in a file_lock structure with an appropriate FLOCK lock.
flock64_to_posix_lock
lease_initInitialize a lease, use the default lock manager operations
fcntl_setlkApply the lock described by l to an open file descriptor.* This implements both the F_SETLK and F_SETLKW commands of fcntl().
fcntl_setlk64Apply the lock described by l to an open file descriptor.* This implements both the F_SETLK and F_SETLKW commands of fcntl().
locks_remove_posixThis function is called when the file is being removed* from the task's fd array. POSIX locks belonging to this task* are deleted at this time.
create_aout_tablesreate_aout_tables() parses the env- and arg-strings in new user* memory and creates the pointer tables from them, and puts their* addresses on the "stack", returning the new stack pointer value.
load_aout_binaryThese are the functions used to load a.out style executables and shared* libraries. There is no binary dependent code anywhere else.
set_brk
create_elf_tables
elf_map
load_elf_binary
load_elf_fdpic_binaryload an fdpic binary into various bits of memory
create_elf_fdpic_tablespresent useful information to the program by shovelling it onto the new* process's stack
flat_core_dump
create_flat_tablesreate_flat_tables() parses the env- and arg-strings in new user* memory and creates the pointer tables from them, and puts their* addresses on the "stack", recording the new stack pointer value.
calc_reloc
load_flat_file
load_flat_binaryThese are the functions used to load flat style executables and shared* libraries. There is no binary dependent code anywhere else.
cn_print_exe_file
format_corenamermat_corename will inspect the pattern parameter, and output a* name into corename, which must have space for at least* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
coredump_finish
umh_pipe_setupmh_pipe_setup* helper function to customize the process used* to collect the core in userspace. Specifically* it sets up a pipe and installs it as fd 0 (stdin)* for the process. Returns 0 on success, or* PTR_ERR on failure.
do_coredump
drop_caches_sysctl_handler
get_vfsmount_from_fd
iomap_do_writepageWrite out a dirty page.* For delalloc space on the page we need to allocate space and flush it.* For unwritten space on the page we need to start the conversion to* regular allocated space.
parse_mount_optionsparse_mount_options():* Set @opts to mount options specified in @data. If an option is not* specified in @data, set it to its default value.* Note: @data may be NULL (in which case all options are set to default).
ramfs_mmu_get_unmapped_area