函数逻辑报告 |
Source Code:arch\x86\include\asm\current.h |
Create Date:2022-07-27 06:38:37 |
Last Modify:2020-03-12 14:18:49 | Copyright©Brick |
首页 | 函数Tree |
注解内核,赢得工具 | 下载SCCT | English |
函数名称:get_current
函数原型:static __always_inline struct task_struct *get_current(void)
返回类型:struct task_struct
参数:无
名称 | 描述 |
---|---|
dump_stack_print_info | dump_stack_print_info - print generic debug info for dump_stack()*@log_lvl: log level* Arch-specific dump_stack() implementations can use this function to* print out the same debug information as the generic dump_stack(). |
irqsafe1_hard_spin_12 | |
irqsafe1_hard_spin_21 | |
irqsafe1_hard_rlock_12 | |
irqsafe1_hard_rlock_21 | |
irqsafe1_hard_wlock_12 | |
irqsafe1_hard_wlock_21 | |
irqsafe1_soft_spin_12 | |
irqsafe1_soft_spin_21 | |
irqsafe1_soft_rlock_12 | |
irqsafe1_soft_rlock_21 | |
irqsafe1_soft_wlock_12 | |
irqsafe1_soft_wlock_21 | |
irqsafe2B_hard_spin_12 | |
irqsafe2B_hard_spin_21 | |
irqsafe2B_hard_rlock_12 | |
irqsafe2B_hard_rlock_21 | |
irqsafe2B_hard_wlock_12 | |
irqsafe2B_hard_wlock_21 | |
irqsafe2B_soft_spin_12 | |
irqsafe2B_soft_spin_21 | |
irqsafe2B_soft_rlock_12 | |
irqsafe2B_soft_rlock_21 | |
irqsafe2B_soft_wlock_12 | |
irqsafe2B_soft_wlock_21 | |
irqsafe3_hard_spin_123 | |
irqsafe3_hard_spin_132 | |
irqsafe3_hard_spin_213 | |
irqsafe3_hard_spin_231 | |
irqsafe3_hard_spin_312 | |
irqsafe3_hard_spin_321 | |
irqsafe3_hard_rlock_123 | |
irqsafe3_hard_rlock_132 | |
irqsafe3_hard_rlock_213 | |
irqsafe3_hard_rlock_231 | |
irqsafe3_hard_rlock_312 | |
irqsafe3_hard_rlock_321 | |
irqsafe3_hard_wlock_123 | |
irqsafe3_hard_wlock_132 | |
irqsafe3_hard_wlock_213 | |
irqsafe3_hard_wlock_231 | |
irqsafe3_hard_wlock_312 | |
irqsafe3_hard_wlock_321 | |
irqsafe3_soft_spin_123 | |
irqsafe3_soft_spin_132 | |
irqsafe3_soft_spin_213 | |
irqsafe3_soft_spin_231 | |
irqsafe3_soft_spin_312 | |
irqsafe3_soft_spin_321 | |
irqsafe3_soft_rlock_123 | |
irqsafe3_soft_rlock_132 | |
irqsafe3_soft_rlock_213 | |
irqsafe3_soft_rlock_231 | |
irqsafe3_soft_rlock_312 | |
irqsafe3_soft_rlock_321 | |
irqsafe3_soft_wlock_123 | |
irqsafe3_soft_wlock_132 | |
irqsafe3_soft_wlock_213 | |
irqsafe3_soft_wlock_231 | |
irqsafe3_soft_wlock_312 | |
irqsafe3_soft_wlock_321 | |
irqsafe4_hard_spin_123 | |
irqsafe4_hard_spin_132 | |
irqsafe4_hard_spin_213 | |
irqsafe4_hard_spin_231 | |
irqsafe4_hard_spin_312 | |
irqsafe4_hard_spin_321 | |
irqsafe4_hard_rlock_123 | |
irqsafe4_hard_rlock_132 | |
irqsafe4_hard_rlock_213 | |
irqsafe4_hard_rlock_231 | |
irqsafe4_hard_rlock_312 | |
irqsafe4_hard_rlock_321 | |
irqsafe4_hard_wlock_123 | |
irqsafe4_hard_wlock_132 | |
irqsafe4_hard_wlock_213 | |
irqsafe4_hard_wlock_231 | |
irqsafe4_hard_wlock_312 | |
irqsafe4_hard_wlock_321 | |
irqsafe4_soft_spin_123 | |
irqsafe4_soft_spin_132 | |
irqsafe4_soft_spin_213 | |
irqsafe4_soft_spin_231 | |
irqsafe4_soft_spin_312 | |
irqsafe4_soft_spin_321 | |
irqsafe4_soft_rlock_123 | |
irqsafe4_soft_rlock_132 | |
irqsafe4_soft_rlock_213 | |
irqsafe4_soft_rlock_231 | |
irqsafe4_soft_rlock_312 | |
irqsafe4_soft_rlock_321 | |
irqsafe4_soft_wlock_123 | |
irqsafe4_soft_wlock_132 | |
irqsafe4_soft_wlock_213 | |
irqsafe4_soft_wlock_231 | |
irqsafe4_soft_wlock_312 | |
irqsafe4_soft_wlock_321 | |
irq_inversion_hard_spin_123 | |
irq_inversion_hard_spin_132 | |
irq_inversion_hard_spin_213 | |
irq_inversion_hard_spin_231 | |
irq_inversion_hard_spin_312 | |
irq_inversion_hard_spin_321 | |
irq_inversion_hard_rlock_123 | |
irq_inversion_hard_rlock_132 | |
irq_inversion_hard_rlock_213 | |
irq_inversion_hard_rlock_231 | |
irq_inversion_hard_rlock_312 | |
irq_inversion_hard_rlock_321 | |
irq_inversion_hard_wlock_123 | |
irq_inversion_hard_wlock_132 | |
irq_inversion_hard_wlock_213 | |
irq_inversion_hard_wlock_231 | |
irq_inversion_hard_wlock_312 | |
irq_inversion_hard_wlock_321 | |
irq_inversion_soft_spin_123 | |
irq_inversion_soft_spin_132 | |
irq_inversion_soft_spin_213 | |
irq_inversion_soft_spin_231 | |
irq_inversion_soft_spin_312 | |
irq_inversion_soft_spin_321 | |
irq_inversion_soft_rlock_123 | |
irq_inversion_soft_rlock_132 | |
irq_inversion_soft_rlock_213 | |
irq_inversion_soft_rlock_231 | |
irq_inversion_soft_rlock_312 | |
irq_inversion_soft_rlock_321 | |
irq_inversion_soft_wlock_123 | |
irq_inversion_soft_wlock_132 | |
irq_inversion_soft_wlock_213 | |
irq_inversion_soft_wlock_231 | |
irq_inversion_soft_wlock_312 | |
irq_inversion_soft_wlock_321 | |
irq_read_recursion_hard_123 | |
irq_read_recursion_hard_132 | |
irq_read_recursion_hard_213 | |
irq_read_recursion_hard_231 | |
irq_read_recursion_hard_312 | |
irq_read_recursion_hard_321 | |
irq_read_recursion_soft_123 | |
irq_read_recursion_soft_132 | |
irq_read_recursion_soft_213 | |
irq_read_recursion_soft_231 | |
irq_read_recursion_soft_312 | |
irq_read_recursion_soft_321 | |
check_preemption_disabled | |
should_fail | This code is stolen from failmalloc-1.0* http://www.nongnu.org/failmalloc/ |
validate_nla | |
__nla_validate_parse | |
suppress_report | |
ubsan_prologue | |
ubsan_epilogue | |
restore_sigcontext | |
setup_sigcontext | |
get_sigframe | |
__setup_frame | |
__setup_rt_frame | |
handle_signal | 自定义信号处理函数 |
__die | |
read_ldt | |
write_ldt | |
get_align_mask | Align a virtual address to avoid aliasing in the I$ on AMD F15h. |
find_start_end | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,unsigned long, prot, unsigned long, flags,unsigned long, fd, unsigned long, off)long error;error = -EINVAL;if (off & ~PAGE_MASK)goto out;error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off |
arch_get_unmapped_area | |
arch_get_unmapped_area_topdown | |
aout_dump_debugregs | Dump the debug register contents to the user.* We can't dump our per cpu values because it* may contain cpu wide breakpoint, something that* doesn't belong to the current task.* TODO: include non-ptrace user breakpoints (perf) |
hw_breakpoint_restore | |
hw_breakpoint_handler | Handle debug exception notifications.* Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.* NOTIFY_DONE returned if one of the following conditions is true.* i) When the causative address is from user-space and the exception |
copy_thread_tls | |
arch_setup_new_exec | Called immediately after a successful exec. |
arch_align_stack | |
kernel_fpu_begin | |
fpu__save | Save the FPU state (mark it for reload if necessary):* This only ever gets called for the current task. |
fpu__copy | |
fpu__initialize | Activate the current task's in-memory FPU context,* if it has not been used before: |
fpu__prepare_read | This function must be called before we read a task's fpstate |
fpu__prepare_write | This function must be called before we write a task's fpstate.* Invalidate any cached FPU registers.* After this function call, after registers in the fpstate are* modified and the child task has woken up, the child task will |
fpu__drop | Drops current FPU state: deactivates the fpregs and* the fpstate. NOTE: it still leaves previous contents* in the fpregs in the eager-FPU case.* This function can be used in cases where we know that* a state-restore is coming: either an explicit one, |
fpu__clear | Clear the FPU state back to init state.* Called by sys_execve(), by the signal handler code and by various* error paths. |
fpregs_mark_activate | |
get_xsave_field_ptr | This wraps up the common operations that need to occur when retrieving* data from xsave state. It first ensures that the current task was* using the FPU and retrieves the data in to a buffer. It then calculates |
ptrace_triggered | |
get_free_idx | sys_alloc_thread_area: get a yet unused TLS descriptor index. |
set_tls_desc | |
move_myself | |
pseudo_lock_dev_mmap | |
save_v86_state | |
do_sys_vm86 | |
set_vflags_long | It is correct to call set_IF(regs) from the set_vflags_** functions. However someone forgot to call clear_IF(regs)* in the opposite case.* After the command sequence CLI PUSHF STI POPF you should* end up with interrupts disabled, but you ended up with |
set_vflags_short | |
get_vflags | |
do_int | There are so many possible reasons for this function to return* VM86_INTx, so adding another doesn't bother me. We can expect* userspace programs to be able to handle it. (Getting a problem* in userspace is always better than an Oops anyway.) [KD] |
handle_vm86_trap | |
handle_vm86_fault | |
riprel_pre_xol | If we're emulating a rip-relative instruction, save the contents* of the scratch register and store the target address in that register. |
riprel_post_xol | |
default_post_xol_op | We have to fix things up as follows:* Typically, the new ip is relative to the copied instruction |
arch_uprobe_pre_xol | arch_uprobe_pre_xol - prepare to execute out of line.*@auprobe: the probepoint information.*@regs: reflects the saved user state of current task. |
arch_uprobe_post_xol | Called after single-stepping. To avoid the SMP problems that can* occur when we temporarily put back the original opcode to* single-step, we single-stepped a copy of the instruction.* This function prepares to resume execution after the single-step. |
arch_uprobe_abort_xol | This function gets called when XOL instruction either gets trapped or* the thread has a fatal signal. Reset the instruction pointer to its* probed address for the potential restart or for post mortem analysis. |
arch_uretprobe_hijack_return_addr | |
__mmdrop | Called when the last reference to the mm* is dropped: either by a lazy thread or by* mmput. Free the page directory and the mm. |
mm_init | 设置内核内存分配器 |
mm_access | |
copy_mm | 复制内存 |
copy_fs | 复制文件系统 |
copy_files | 复制打开文件信息 |
copy_io | 复制I/O |
copy_sighand | 复制信号句柄 |
copy_signal | 复制信号 |
SYSCALL_DEFINE1 | |
copy_process | 创建进程 |
check_unshare_flags | Check constraints on flags passed to the unshare system call. |
unshare_fs | Unshare the filesystem structure if it is being shared |
unshare_fd | Unshare file descriptor table if it is being shared |
ksys_unshare | share allows a process to 'unshare' part of the process* context which was originally shared using clone. copy_** functions used by do_fork() cannot be used here directly* because they modify an inactive task_struct that is being* constructed |
SYSCALL_DEFINE1 | |
__warn | |
exit_mm | Turn us into a lazy TLB process if we* aren't already.. |
do_exit | |
do_group_exit | Take down every thread in the group. This is called by fatal signals* as well as by sys_exit_group (below). |
wait_task_zombie | Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold* read_lock(&tasklist_lock) on entry. If we return zero, we still hold* the lock and this task is uninteresting. If we return nonzero, we have |
do_wait | |
__do_softirq | |
warn_sysctl_write | |
deprecated_sysctl_warning | |
ptrace_traceme | ptrace_traceme -- helper for PTRACE_TRACEME* Performs checks and sets PT_PTRACED.* Should be used by all ptrace implementations for PTRACE_TRACEME. |
ptrace_setoptions | |
calculate_sigpending | |
print_dropped_signal | |
task_join_group_stop | |
dequeue_signal | Dequeue a signal and return the element to the caller, which is* expected to free it.* All callers have to hold the siglock. |
sigqueue_free | |
may_ptrace_stop | |
ptrace_stop | This must be called with current->sighand->siglock held.* This should be the path for all ptrace stops.* We always set current->last_siginfo while stopped here.* That makes it a way to test a stopped process for |
ptrace_notify | |
do_signal_stop | do_signal_stop - handle group stop for SIGSTOP and other stop signals*@signr: signr causing group stop if initiating* If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr* and participate in it |
do_jobctl_trap | do_jobctl_trap - take care of ptrace jobctl traps* When PT_SEIZED, it's used for both group stop and explicit* SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with* accompanying siginfo. If stopped, lower eight bits of exit_code contain |
do_freezer_trap | do_freezer_trap - handle the freezer jobctl trap* Puts the task into frozen state, if only the task is not about to quit.* In this case it drops JOBCTL_TRAP_FREEZE.* CONTEXT:* Must be called with @current->sighand->siglock held, |
ptrace_signal | |
get_signal | |
signal_delivered | signal_delivered - *@ksig: kernel signal struct*@stepping: nonzero if debugger single-step or block-step in use* This function should be called when a signal has successfully been* delivered |
sys_restart_syscall | |
__set_task_blocked | |
set_user_sigmask | The api helps set app-provided sigmasks |
set_compat_user_sigmask | |
SYSCALL_DEFINE4 | sys_rt_sigprocmask - change the list of currently blocked signals*@how: whether to add, remove, or set signals*@nset: stores pending signals*@oset: previous value of signal mask if non-null*@sigsetsize: size of sigset_t type |
COMPAT_SYSCALL_DEFINE4 | |
do_sigpending | |
kernel_sigaction | 内核信号处理 |
do_sigaltstack | |
SYSCALL_DEFINE3 | sys_sigprocmask - examine and change blocked signals*@how: whether to add, remove, or set signals*@nset: signals to add or remove (if non-null)*@oset: previous value of signal mask if non-null* Some platforms have their own version with special arguments; |
sigsuspend | |
sys_getppid | |
do_sys_times | |
SYSCALL_DEFINE2 | This needs some heavy checking |
set_special_pids | |
ksys_setsid | |
override_release | Work around broken programs that cannot handle "Linux 3.0".* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40* And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be* 2.6.60. |
SYSCALL_DEFINE2 | |
SYSCALL_DEFINE2 | Only setdomainname; getdomainname can be implemented by calling* uname() |
SYSCALL_DEFINE2 | Back compatibility for getrlimit. Needed for some apps. |
COMPAT_SYSCALL_DEFINE2 | |
SYSCALL_DEFINE1 | |
prctl_set_mm | |
SYSCALL_DEFINE5 | |
call_usermodehelper_exec_async | This is the task which runs the usermode application |
process_one_work | process_one_work - process single work*@worker: self*@work: work to process* Process @work |
set_pf_worker | |
check_flush_dependency | heck_flush_dependency - check for flush dependency sanity*@target_wq: workqueue being flushed*@target_work: work item being flushed (NULL for workqueue flushes)* %current is trying to flush the whole @target_wq or @target_work on it |
set_kthread_struct | |
kthread | |
create_kthread | |
kthreadd | |
kthread_associate_blkcg | kthread_associate_blkcg - associate blkcg to current kthread*@css: the cgroup info* Current thread must be a kthread. The thread is running jobs on behalf of* other threads. In some cases, we expect the jobs attach cgroup info of |
kthread_blkcg | kthread_blkcg - get associated blkcg css of current kthread* Current thread must be a kthread. |
unshare_nsproxy_namespaces | Called from unshare. Unshare all the namespaces part of nsproxy.* On success, returns the new nsproxy. |
__put_cred | __put_cred - Destroy a set of credentials*@cred: The record to release* Destroy a set of credentials on which no references remain. |
override_creds | verride_creds - Override the current process's subjective credentials*@new: The credentials to be assigned* Install a set of temporary override subjective credentials on the current* process, returning the old set for later reversion. |
revert_creds | vert_creds - Revert a temporary subjective credentials override*@old: The credentials to be restored* Revert a temporary set of override subjective credentials to an old set,* discarding the override set. |
migrate_to_reboot_cpu | |
async_schedule_node_domain | async_schedule_node_domain - NUMA specific version of async_schedule_domain*@func: function to execute asynchronously*@data: data pointer to pass to the function*@node: NUMA node that we want to schedule this on or close to*@domain: the domain |
sched_fork | 调度进程 |
finish_task_switch | sh_task_switch - clean up after a task-switch*@prev: the thread we just switched away from |
schedule_tail | schedule_tail - first thing a freshly forked thread must call.*@prev: the thread we just switched away from. |
do_task_dead | |
schedule_idle | synchronize_rcu_tasks() makes sure that no task is stuck in preempted* state (have scheduled out non-voluntarily) by making sure that all* tasks have either left the run queue or have gone into user space |
do_sched_yield | sys_sched_yield - yield the current processor to other threads.* This function yields the current CPU to other tasks. If there are no* other threads running on this CPU then this function will return.* Return: 0. |
io_schedule_prepare | |
io_schedule_finish | |
play_idle_precise | |
is_kthread_should_stop | |
ipi_sync_rq_state | |
membarrier_private_expedited | |
psi_memstall_enter | psi_memstall_enter - mark the beginning of a memory stall section*@flags: flags to handle nested sections* Marks the calling task as being stalled due to a lack of memory,* such as waiting for a refault or performing reclaim. |
psi_memstall_leave | psi_memstall_leave - mark the end of an memory stall section*@flags: flags to handle nested memdelay sections* Marks the calling task as no longer stalled due to lack of memory. |
graph_lock | |
graph_unlock | |
lockdep_off | |
lockdep_on | |
lockdep_init_map | Initialize a lock instance's lock-class mapping info: |
lock_set_class | |
lock_downgrade | |
lock_acquire | We are not always called with irqs disabled - do that here,* and also avoid lockdep recursion: |
lock_release | |
lock_is_held_type | |
lock_pin_lock | |
lock_repin_lock | |
lock_unpin_lock | |
lockdep_reset | |
free_zapped_rcu | |
lockdep_free_key_range_reg | Used in module |
print_held_locks_bug | |
debug_check_no_locks_held | |
mark_wakeup_next_waiter | Remove the top waiter from the current tasks pi waiter tree and* queue it up.* Called with lock->wait_lock held and interrupts disabled. |
remove_waiter | Remove a waiter from a lock and give up* Must be called with lock->wait_lock held and interrupts disabled. I must* have just failed to try_to_take_rt_mutex(). |
debug_rt_mutex_print_deadlock | |
spin_dump | |
rwlock_bug | |
freeze_processes | ze_processes - Signal user space processes to enter the refrigerator.* The current thread will not be frozen. The same process that calls* freeze_processes must later call thaw_processes.* On success, returns 0 |
check_syslog_permissions | |
devkmsg_write | |
irq_thread_dtor | |
debug_lockdep_rcu_enabled | |
synchronize_rcu_trivial | Definitions for trivial CONFIG_PREEMPT=n-only torture testing.* This implementation does not necessarily work well with CPU hotplug. |
klp_ftrace_handler | |
klp_copy_process | Called from copy_process() during fork |
check_for_stack | |
__refrigerator | Refrigerator is place where frozen processes are stored :-). |
set_freezable | set_freezable - make %current freezable* Mark %current freezable and enter refrigerator if necessary. |
schedule_timeout | schedule_timeout - sleep until timeout*@timeout: timeout value in jiffies* Make the current task sleep until @timeout jiffies have* elapsed |
do_nanosleep | |
hrtimer_nanosleep | |
posix_timer_by_id | |
posix_timer_add | |
do_timer_create | Create a POSIX.1b interval timer. |
__lock_timer | CLOCKs: The POSIX standard calls for a couple of clocks and allows us* to implement others |
SYSCALL_DEFINE1 | Delete a POSIX.1b interval timer. |
SYSCALL_DEFINE4 | |
lookup_task | Functions for validating access to tasks. |
check_rlimit | |
do_cpu_nanosleep | |
posix_cpu_nsleep | |
SYSCALL_DEFINE3 | |
COMPAT_SYSCALL_DEFINE3 | |
sys_ni_posix_timers | |
SYSCALL_DEFINE4 | |
get_futex_key | get_futex_key() - Get parameters which are the keys for a futex*@uaddr: virtual address of the futex*@fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED*@key: address where result is stored |
fault_in_user_writeable | ault_in_user_writeable() - Fault in user address and verify RW access*@uaddr: pointer to faulting user space address* Slow path to fixup the fault we just took in the atomic write* access to @uaddr |
refill_pi_state_cache | PI code: |
alloc_pi_state | |
put_pi_state | Drops a reference to the pi_state object and frees or caches it* when the last reference is gone. |
futex_atomic_op_inuser | |
__queue_me | |
futex_wait | |
futex_wait_requeue_pi | ex_wait_requeue_pi() - Wait on uaddr and take uaddr2*@uaddr: the futex we initially wait on (non-pi)*@flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc |
SYSCALL_DEFINE2 | sys_set_robust_list() - Set the robust-futex list head of a task*@head: pointer to the list-head*@len: length of the list-head, as userspace expects |
COMPAT_SYSCALL_DEFINE2 | |
do_init_module | This is where the real work happens.* Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb* helper command 'lx-symbols'. |
fill_ac | Write an accounting entry for an exiting process* The acct_process() call is the workhorse of the process* accounting system. The struct acct is built here and then written* into the accounting file. This function should only be called from |
do_acct_process | |
acct_collect | acct_collect - collect accounting information into pacct_struct*@exitcode: task exit code*@group_dead: not 0, if this thread is the last one in the process. |
crash_save_cpu | |
COMPAT_SYSCALL_DEFINE3 | |
current_cgns_cgroup_from_root | look up cgroup associated with current task's cgroup namespace on the* specified hierarchy |
apply_cgroup_root_flags | |
cgroup_init_fs_context | Initialise the cgroup filesystem creation/reconfiguration context. Notably,* we select the namespace we're going to use. |
cgroup_file_write | |
cgroup_procs_write_permission | |
css_create | |
proc_cgroup_show | proc_cgroup_show()* - Print task's cgroup paths into seq_file, one line for each hierarchy* - Used for /proc/ |
cgroup1_reconfigure | |
cgroup_enter_frozen | Enter frozen/stopped state, if not yet there. Update cgroup's counters,* and revisit the state of the cgroup, if necessary. |
cgroup_leave_frozen | Conditionally leave frozen/stopped state |
cpuset_fork | Make sure the new task conform to the current state of its parent,* which could have been changed by cpuset just after it inherits the* state from the parent and before it sits on the cgroup's task list. |
cpuset_init_current_mems_allowed | |
cpuset_nodemask_valid_mems_allowed | puset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed*@nodemask: the nodemask to be checked* Are any of the nodes in the nodemask allowed in current->mems_allowed? |
__cpuset_node_allowed | puset_node_allowed - Can we allocate on a memory node?*@node: is this an allowed node?*@gfp_mask: memory allocation flags* If we're in interrupt, yes, we can always allocate. If @node is set in* current's mems_allowed, yes |
cpuset_spread_node | puset_mem_spread_node() - On which node to begin search for a file page* cpuset_slab_spread_node() - On which node to begin search for a slab page* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for* tasks in a cpuset with is_spread_page or |
cpuset_mem_spread_node | |
cpuset_slab_spread_node | |
cpuset_print_current_mems_allowed | puset_print_current_mems_allowed - prints current's cpuset and mems_allowed* Description: Prints current's name, cpuset name, and cached copy of its* mems_allowed to the kernel log. |
userns_install | |
zap_pid_ns_processes | |
audit_receive_msg | |
audit_get_tty | |
audit_log_task_info | |
audit_set_loginuid | audit_set_loginuid - set current task's loginuid*@loginuid: loginuid value* Returns 0.* Called (set) from fs/proc/base.c::proc_loginuid_write(). |
audit_log_execve_info | |
audit_log_exit | |
__audit_getname | __audit_getname - add a name to the list*@name: name to add* Add a name to the list of audit names for this context.* Called from fs/namei.c:getname(). |
audit_log_task | |
kcov_task_init | |
kcov_ioctl_locked | |
kcov_common_handle | See the comment before kcov_remote_start() for usage details. |
kgdb_flush_swbreak_addr | Some architectures need cache flushes when we set/clear a* breakpoint: |
gdb_cmd_query | Handle the 'q' query packets |
gdb_serial_stub | This function performs all gdbserial command procesing |
seccomp_may_assign_mode | |
secure_computing_strict | |
prctl_get_seccomp | |
seccomp_set_mode_strict | seccomp_set_mode_strict: internal function for setting strict seccomp* Once current->seccomp.mode is non-zero, it may not be changed.* Returns 0 on success or -EINVAL on failure. |
get_uts | |
__delayacct_blkio_start | |
__delayacct_freepages_start | |
__delayacct_freepages_end | |
__delayacct_thrashing_start | |
__delayacct_thrashing_end | |
__rb_allocate_pages | |
probe_wakeup | |
move_to_next_cpu | |
ftrace_push_return_trace | Add a function return address to the trace stack on thread info. |
function_graph_enter | |
ftrace_pop_return_trace | Retrieve a function return address to the trace stack on thread info. |
ftrace_return_to_handler | the original return address. |
filter_pred_comm | Filter predicate for COMM. |
____bpf_probe_write_user | |
bpf_get_probe_write_proto | |
____bpf_send_signal | |
process_fetch_insn | Note that we don't verify it, since the code does not come from user space |
fetch_store_string | Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max* length and relative data location. |
fetch_store_strlen | Return the length of string -- including null terminal byte |
translate_user_vaddr | |
uprobe_dispatcher | |
uretprobe_dispatcher | |
dev_map_update_elem | |
dev_map_hash_update_elem | |
bpf_prog_offload_init | |
bpf_map_offload_map_alloc | |
stack_map_get_build_id_offset | |
perf_event_enable_on_exec | Enable all of a task's events that have been marked enable-on-exec.* This expects task == current. |
perf_event_task_enable | |
perf_event_task_disable | |
perf_sample_regs_user | |
perf_virt_to_phys | |
perf_iterate_sb | Iterate all events that need to receive side-band events.* For new callers; ensure that account_pmu_sb_event() includes* your event, otherwise it might not get delivered. |
perf_event_exec | |
perf_addr_filters_adjust | Adjust all task's events' filters to the new vma |
get_perf_callchain | |
__create_xol_area | |
get_xol_area | get_xol_area - Allocate process's xol_area if necessary.* This area will be used for storing instructions for execution out of line.* Returns the allocated area or NULL. |
uprobe_get_trap_addr | |
get_utask | Allocate a uprobe_task object for the task if if necessary.* Called when the thread hits a breakpoint.* Returns:* - pointer to new uprobe_task on success* - NULL otherwise |
uprobe_warn | |
dup_xol_work | |
uprobe_copy_process | 复制上下文 |
get_trampoline_vaddr | Current area->vaddr notion assume the trampoline address is always* equal area->vaddr.* Returns -1 in case the xol_area is not allocated. |
prepare_uretprobe | |
find_active_uprobe | |
handler_chain | |
handle_trampoline | |
handle_singlestep | Perform required fix-ups and disable singlestep.* Allow pending signals to take effect. |
uprobe_notify_resume | On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and* allows the thread to return from interrupt. After that handle_swbp()* sets utask->active_uprobe.* On singlestep exception, singlestep notifier sets the TIF_UPROBE flag |
uprobe_pre_sstep_notifier | probe_pre_sstep_notifier gets called from interrupt context as part of* notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. |
uprobe_post_sstep_notifier | probe_post_sstep_notifier gets called in interrupt context as part of notifier* mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. |
__context_tracking_enter | ext_tracking_enter - Inform the context tracking that the CPU is going* enter user or guest space mode |
rseq_get_rseq_cs | |
stackleak_erase | |
stackleak_track_stack | |
do_mount_root | |
handle_initrd | |
unaccount_page_cache_page | |
__add_to_page_cache_locked | |
dio_warn_stale_pagecache | Warn about a page cache invalidation failure during a direct I/O write. |
__generic_file_write_iter | __generic_file_write_iter - write data to a file*@iocb: IO state structure (file, offset, etc.)*@from: iov_iter with data to write* This function does all the work needed for actually writing data to a* file |
dump_header | |
out_of_memory | _of_memory - kill the "best" process when we run out of memory*@oc: pointer to struct oom_control* If we run out of memory, we have the choice between either* killing a random task (bad), letting the system crash (worse) |
balance_dirty_pages | alance_dirty_pages() must be called by processes which are generating dirty* data |
balance_dirty_pages_ratelimited | alance_dirty_pages_ratelimited - balance dirty memory state*@mapping: address_space which was dirtied* Processes which are dirtying memory should call in here once for each page* which was newly dirtied. The function will periodically check the system's |
account_page_dirtied | Helper function for set_page_dirty family.* Caller must hold lock_page_memcg().* NOTE: This relies on being atomic wrt interrupts. |
account_page_redirty | Call this whenever redirtying a page, to de-account the dirty counters* (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written* counters (NR_WRITTEN, WB_WRITTEN) in long term |
may_write_to_inode | |
current_may_throttle | If a kernel thread (such as nfsd for loop-back mounts) services* a backing device by writing to the page cache it sets PF_LESS_THROTTLE.* In that case we should only throttle if the backing device it is* writing to is congested |
shrink_node | |
throttle_direct_reclaim | Throttle direct reclaimers if backing storage is backed by the network* and the PFMEMALLOC reserve for the preferred node is getting dangerously* depleted. kswapd will continue to make progress and wake the processes* when the low watermark is reached. |
mem_cgroup_shrink_node | Only used by soft limit reclaim. Do not reuse for anything else. |
__node_reclaim | Try to free up some pages from this node through reclaim. |
node_reclaim | |
shmem_get_unmapped_area | |
randomize_stack_top | |
vm_mmap_pgoff | |
vmacache_valid_mm | This task may be accessing a foreign mm via (for example)* get_user_pages()->find_vma(). The vmacache is task-local and this* task's vmacache pertains to a different mm (ie, its own). There is* nothing we can do here. |
vmacache_update | |
vmacache_find | |
__mm_populate | __mm_populate - populate and/or mlock pages within a range of address space.* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap* flags. VMAs must be already marked with the desired vm_flags, and* mmap_sem must not be held. |
get_user_pages | This is the same as get_user_pages_remote(), just with a* less-flexible calling convention where we assume that the task* and mm being operated on are the current task's and don't allow* passing of a locked parameter. We also obviously don't pass |
get_user_pages_locked | We can leverage the VM_FAULT_RETRY functionality in the page fault* paths better by using either get_user_pages_locked() or* get_user_pages_unlocked().* get_user_pages_locked() is suitable to replace the form:* down_read(&mm->mmap_sem);* do_something() |
get_user_pages_unlocked | get_user_pages_unlocked() is suitable to replace the form:* down_read(&mm->mmap_sem);* get_user_pages(tsk, mm, |
__gup_longterm_unlocked | |
kobjsize | Return the total memory allocated for this pointer, not* just what the caller asked for.* Doesn't have to be accurate, i.e. may have races. |
__vmalloc_user_flags | |
SYSCALL_DEFINE1 | sys_brk() for the most part doesn't need the global kernel* lock, except when an application is doing something nasty* like trying to un-brk an area that has already been mapped* to a regular file. in this case, the unmapping will need |
validate_mmap_request | determine whether a mapping should be permitted and, if so, what sort of* mapping we're capable of supporting |
determine_vm_flags | we've determined that we can make the mapping, now translate what we* now know into VMA flags |
do_mmap_private | set up a private mapping or an anonymous shared mapping |
do_mmap | handle mapping creation for uClinux |
do_munmap | lease a mapping* - under NOMMU conditions the chunk to be unmapped must be backed by a single* VMA, though it need not cover the whole VMA |
vm_munmap | |
do_mremap | xpand (or shrink) an existing mapping, potentially moving it at the same* time (controlled by the MREMAP_MAYMOVE flag and available VM space)* under NOMMU conditions, we only permit changing a mapping's size, and only* as long as it stays within the |
SYSCALL_DEFINE5 | |
sync_mm_rss | |
add_mm_rss_vec | |
print_bad_pte | This function is called to print an error when a bad pte* is found. For example, we might have a PFN-mapped pte in* a region that doesn't allow it.* The calling function must still handle the error. |
print_vma_addr | Print the name of a VMA. |
do_mincore | Do a chunk of "sys_mincore()". We've already checked* all the arguments, we hold the mmap semaphore: we should* just return the amount of info we're asked for. |
SYSCALL_DEFINE3 | The mincore(2) system call |
mlock_fixup | mlock_fixup - handle mlock[all]/munlock[all] requests.* Filters out "special" vmas -- VM_LOCKED never gets set for these, and* munlock is a no-op. However, for some special vmas, we go ahead and* populate the ptes. |
apply_vma_lock_flags | |
count_mm_mlocked_page_nr | Go through vma areas and sum size of mlocked* vma pages, as return value.* Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)* is also counted.* Return value: previously mlocked page counts |
do_mlock | |
SYSCALL_DEFINE2 | |
apply_mlockall_flags | Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)* and translate into the appropriate modifications to mm->def_flags and/or the* flags for all current VMAs.* There are a couple of subtleties with this |
SYSCALL_DEFINE1 | |
sys_munlockall | |
SYSCALL_DEFINE1 | |
do_mmap | The caller must hold down_write(¤t->mm->mmap_sem). |
mmap_region | |
unmapped_area | |
unmapped_area_topdown | |
arch_get_unmapped_area | Get an address range which is currently unmapped.* For shmat() with addr=0.* Ugly calling convention alert:* Return value with the low bits set means error value,* ie* if (ret & ~PAGE_MASK)* error = ret; |
arch_get_unmapped_area_topdown | This mmap-allocator allocates new areas top-down from below the* stack's low limit (the base): |
get_unmapped_area | |
__vm_munmap | |
SYSCALL_DEFINE5 | Emulation of deprecated remap_file_pages() syscall. |
do_brk_flags | his is really a simplified "do_mmap". it only handles* anonymous maps. eventually we may be able to do some* brk-specific accounting here. |
vm_brk_flags | |
may_expand_vm | Return true if the calling process may expand its vm space by the passed* number of pages |
special_mapping_mremap | |
mprotect_fixup | |
do_mprotect_pkey | pkey==-1 when doing a legacy mprotect() |
vma_to_resize | |
mremap_to | |
SYSCALL_DEFINE5 | Expand (or shrink) an existing mapping, potentially moving it at the* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise* This option implies MREMAP_MAYMOVE. |
SYSCALL_DEFINE3 | MS_SYNC syncs the entire file - including mappings |
bad_page | |
warn_alloc_show_mem | |
warn_alloc | |
__alloc_pages_may_oom | |
__gfp_pfmemalloc_flags | Distinguish requests which really need access to full memory* reserves from oom victims which can live with a portion of it |
should_reclaim_retry | Checks whether it makes sense to retry the reclaim to make a forward progress* for the given allocation request.* We give up when we either have tried MAX_RECLAIM_RETRIES in a row* without success, or when we couldn't even meet the watermark if we |
__alloc_pages_slowpath | |
madvise_willneed | Schedule all required I/O operations. Do not wait for completion. |
madvise_free_pte_range | |
madvise_dontneed_free | |
madvise_remove | Application wants to free up the pages and associated backing store.* This is effectively punching a hole into the middle of a file. |
SYSCALL_DEFINE3 | The madvise(2) system call |
SYSCALL_DEFINE1 | |
mem_cgroup_throttle_swaprate | |
__frontswap_unuse_pages | |
hugetlb_no_page | |
do_set_mempolicy | Set the process memory policy |
do_get_mempolicy | Retrieve NUMA policy |
new_page | Allocate a new page for page migration based on vma policy |
do_mbind | |
mempolicy_slab_node | Depending on the memory policy provide a node from which to allocate the* next slab entry. |
init_nodemask_of_mempolicy | _nodemask_of_mempolicy* If the current task's mempolicy is "default" [NULL], return 'false'* to indicate default policy |
__mpol_dup | Slow path of a mempolicy duplicate |
slob_free_pages | |
kmem_freepages | Interface to system's page release. |
alternate_node_alloc | Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.* If we are in_interrupt, then process context, including cpusets and* mempolicy, may not apply and should not be used for allocation policy. |
__do_cache_alloc | |
__free_slab | |
set_track | |
kasan_enable_current | |
kasan_disable_current | |
print_error_description | |
print_address_stack_frame | |
report_enabled | |
__unmap_and_move | |
migrate_pages | migrate_pages - migrate the pages specified in a list, to the free pages* supplied as the target for the page migration*@from: The list of pages to be migrated.*@get_new_page: The function used to allocate free pages to be used |
__thp_get_unmapped_area | |
thp_get_unmapped_area | |
should_force_charge | |
get_mem_cgroup_from_current | If current->active_memcg is non-NULL, do not fallback to current->mm->memcg. |
mem_cgroup_oom | |
mem_cgroup_oom_synchronize | mem_cgroup_oom_synchronize - complete memcg OOM handling*@handle: actually kill/wait or just clean up the OOM state* This has to be called at the end of a page fault if the memcg OOM* handler was enabled |
mem_cgroup_handle_over_high | Scheduled by try_charge() to be executed from the userland return path* and reclaims memory over the high limit. |
try_charge | |
kill_proc | Send all the processes who have the page mapped a signal.* ``action optional'' if they are not immediately affected by the error* ``action required'' if error happened in current execution context |
create_object | Create the metadata (struct kmemleak_object) corresponding to an allocated* memory block and add it to the object_list and object_tree_root. |
scan_should_stop | Memory scanning is a long process and it needs to be interruptable. This* function checks whether such interrupt condition occurred. |
get_vaddr_frames | get_vaddr_frames() - map virtual addresses to pfns*@start: starting user address*@nr_frames: number of pages / pfns from start to map*@gup_flags: flags modifying lookup behaviour*@vec: structure which receives pages / pfns of the addresses mapped. |
sysvipc_proc_open | |
ksys_msgget | |
ksys_msgctl | |
compat_ksys_msgctl | |
do_msgsnd | |
do_msgrcv | |
ksys_semget | |
check_qop | heck_qop: Test if a queued operation sleeps on the semaphore semnum |
ksys_semctl | |
compat_ksys_semctl | |
get_undo_list | If the task doesn't already have a undo_list, then allocate one* here. We guarantee there is only one thread using this undo list,* and current is THE ONE* If this allocation and assignment succeeds, but later |
do_semtimedop | |
newseg | wseg - Create a new shared memory segment*@ns: namespace*@params: ptr to the structure that contains key, size and shmflg* Called with shm_ids.rwsem held as a writer. |
ksys_shmget | |
ksys_shmctl | |
compat_ksys_shmctl | |
do_shmat | Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.* NOTE! Despite the name, this is NOT a direct system call entrypoint. The* "raddr" thing points to kernel space, and there has to be a wrapper around* this. |
ksys_shmdt | detach and kill segment if marked destroyed.* The work is done in shm_close. |
get_ipc | |
proc_ipc_dointvec_minmax_orphans | |
proc_ipc_sem_dointvec | |
mqueue_init_fs_context | |
wq_add | Adds current to info->e_wait_q[sr] before element with smaller prio |
do_mq_open | |
SYSCALL_DEFINE1 | |
get_mq | |
punt_bios_to_rescuer | |
bio_alloc_bioset | _alloc_bioset - allocate a bio for I/O*@gfp_mask: the GFP_* mask given to the slab allocator*@nr_iovecs: number of iovecs to pre-allocate*@bs: the bio_set to allocate from |
bio_uncopy_user | _uncopy_user - finish previously mapped bio*@bio: bio being terminated* Free pages allocated from bio_copy_user_iov() and write back data* to user space in case of a read. |
generic_make_request | generic_make_request - hand a buffer to its device driver for I/O*@bio: The bio describing the location in memory and on the device.* generic_make_request() is used to make I/O requests of block* devices |
submit_bio | submit_bio - submit a bio to the block device layer for I/O*@bio: The &struct bio which describes the I/O* submit_bio() is very similar in purpose to generic_make_request(), and* uses that function to do most of the work. Both are fairly rough |
blk_check_plugged | |
blk_finish_plug | lk_finish_plug - mark the end of a batch of submitted I/O*@plug: The &struct blk_plug passed to blk_start_plug()* Description:* Indicate that a batch of I/O submissions is complete. This function* must be paired with an initial call to blk_start_plug() |
blk_poll | lk_poll - poll for IO completions*@q: the queue*@cookie: cookie passed back at IO submission time*@spin: whether to spin for completions* Description:* Poll for completions on the passed in queue. Returns number of* completed entries found |
blk_mq_sched_assign_ioc | |
scsi_cmd_ioctl | |
blkcg_maybe_throttle_current | lkcg_maybe_throttle_current - throttle the current task if it has been marked* This is only called if we've been marked with set_notify_resume() |
blkcg_schedule_throttle | lkcg_schedule_throttle - this task needs to check for throttling*@q: the request queue IO was submitted on*@use_memdelay: do we charge this to memory delay for PSI* This is called by the IO controller when we know there's delay accumulated |
bfq_bio_merge | |
bfq_get_queue | |
bfq_split_bfqq | Returns NULL if a new bfqq should be allocated, or the old bfqq if this* was the last process referring to that bfqq. |
key_set_index_key | Finalise an index key to include a part of the description actually in the* index key, to set the domain tag and to calculate the hash. |
key_change_session_keyring | Replace a process's session keyring on behalf of one of its children when* the target process is about to resume userspace execution. |
request_key_auth_new | Create an authorisation token for /sbin/request-key or whoever to gain* access to the caller's security data. |
cap_inh_is_capped | Determine whether the inheritable capabilities are limited to the old* permitted set. Returns 1 if they are limited, 0 if they are not. |
cap_safe_nice | Rationale: code calling task_setscheduler, task_setioprio, and* task_setnice, assumes that* |
cap_task_prctl | ap_task_prctl - Implement process control functions for this security module*@option: The process control function requested*@arg2, @arg3, @arg4, @arg5: The argument data for this function* Allow process control functions (sys_prctl()) to alter |
cap_mmap_addr | ap_mmap_addr - check if able to map given addr*@addr: address attempting to be mapped* If the process is attempting to map memory below dac_mmap_min_addr they need* CAP_SYS_RAWIO. The other parameters to this function are unused by the |
ordered_lsm_init | |
mmap_prot | |
cred_init_security | alise the security for the init task |
selinux_bprm_committing_creds | Prepare a process for imminent new credential changes due to exec |
selinux_bprm_committed_creds | Clean up the process immediately after the installation of new credentials* due to exec |
selinux_nlmsg_perm | |
smack_sk_alloc_security | smack_sk_alloc_security - Allocate a socket blob*@sk: the socket*@family: unused*@gfp_flags: memory allocation flags* Assign Smack pointers to current* Returns 0 on success, -ENOMEM is there's no memory |
smack_socket_post_create | smack_socket_post_create - finish socket setup*@sock: the socket*@family: protocol family*@type: unused*@protocol: unused*@kern: unused* Sets the netlabel information on the socket* Returns 0 on success, and error code otherwise |
smack_init | smack_init - initialize the smack system* Returns 0 on success, -ENOMEM is there's no memory |
smack_privileged | smack_privileged - are all privilege requirements met*@cap: The requested capability* Is the task privileged and allowed to be privileged* by the onlycap rule.* Returns true if the task is allowed to be privileged, false if it's not. |
dump_common_audit_data | dump_common_audit_data - helper to dump common audit data*@a : common audit data |
tomoyo_manager | moyo_manager - Check whether the current process is a policy manager.* Returns true if the current process is permitted to modify policy* via /sys/kernel/security/tomoyo/ interface.* Caller holds tomoyo_read_lock(). |
tomoyo_warn_oom | moyo_warn_oom - Print out of memory warning message.*@function: Function's name. |
tomoyo_domain | moyo_domain - Get "struct tomoyo_domain_info" for current thread.* Returns pointer to "struct tomoyo_domain_info" for current thread. |
tomoyo_cred_prepare | moyo_cred_prepare - Target for security_prepare_creds().*@new: Pointer to "struct cred".*@old: Pointer to "struct cred".*@gfp: Memory allocation flags.* Returns 0. |
tomoyo_file_open | moyo_file_open - Target for security_file_open().*@f: Pointer to "struct file".*@cred: Pointer to "struct cred".* Returns 0 on success, negative value otherwise. |
tomoyo_get_exe | moyo_get_exe - Get tomoyo_realpath() of current process.* Returns the tomoyo_realpath() of current process on success, NULL otherwise.* This function uses kzalloc(), so the caller must call kfree()* if this function didn't return NULL. |
d_namespace_path | d_namespace_path - lookup a name associated with a given path*@path: path to lookup (NOT NULL)*@buf: buffer to store path to (NOT NULL)*@name: Returns - pointer for start of path name with in @buf (NOT NULL)*@flags: flags controlling path |
aa_setprocattr_changehat | aa_setprocattr_chagnehat - handle procattr interface to change_hat*@args: args received from writing to /proc/ |
apparmor_file_open | |
apparmor_bprm_committing_creds | apparmor_bprm_committing_creds - do task cleanup on committing new creds*@bprm: binprm for the exec (NOT NULL) |
set_init_ctx | set_init_ctx - set a task context and profile on the first task.* TODO: allow setting an alternate profile than unconfined |
__aa_transition_rlimits | __aa_transition_rlimits - apply new profile rlimits*@old_l: old label on task (NOT NULL)*@new_l: new label with rlimits to apply (NOT NULL) |
report_access | defers execution because cmdline access can sleep |
lockdown_is_locked_down | lockdown_is_locked_down - Find out if the kernel is locked down*@what: Tag to use in notice generated if lockdown is in effect |
integrity_audit_msg | |
ksys_chdir | |
SYSCALL_DEFINE1 | |
ksys_chroot | |
SYSCALL_DEFINE1 | Careful here! We test whether the file pointer is NULL before* releasing the fd. This ensures that one clone task can't release* an fd while another clone is opening it. |
cp_old_stat | For backward compatibility? Maybe this should be moved* into arch/i386 instead? |
acct_arg_size | The nascent bprm->mm is not visible until exec_mmap() but it can* use a lot of memory, account these pages in current->mm temporary* for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we* change the counter back via acct_arg_size(0). |
bprm_mm_init | Create a new mm_struct and populate it with a temporary stack* vm_area_struct. We don't have enough context at this point to set the stack* flags, permissions, and offset, so we use temporary values. We'll update* them later in setup_arg_pages(). |
setup_arg_pages | Finalizes the stack vm_area_struct. The flags and permissions are updated,* the stack is optionally relocated, and some extra space is added. |
exec_mmap | |
flush_old_exec | Calling this is the point of no return. None of the failures will be* seen by userspace since either the process is already taking a fatal* signal (via de_thread() or coredump), or will have SEGV raised |
setup_new_exec | |
finalize_exec | Runs immediately before start_thread() takes over. |
prepare_bprm_creds | Prepare credentials and lock ->cred_guard_mutex.* install_exec_creds() commits the new creds and drops the lock.* Or, if exec fails before, free_bprm() should release ->cred and* and unlock. |
free_bprm | |
install_exec_creds | stall the new credentials for this executable |
exec_binprm | |
__do_execve_file | sys_execve() executes a new program. |
set_binfmt | |
set_nameidata | |
restore_nameidata | |
set_root | |
nd_jump_link | Helper to directly jump to a known parsed path from ->get_link,* caller must have taken a reference to path beforehand. |
may_follow_link | may_follow_link - Check symlink following for unsafe situations*@nd: nameidata pathwalk data* In the case of the sysctl_protected_symlinks sysctl being enabled,* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is |
path_init | must be paired with terminate_walk() |
select_estimate_accuracy | |
poll_select_finish | |
max_select_fd | |
core_sys_select | We can actually return ERESTARTSYS instead of EINTR, but I'd* like to be certain this leads to no problems. So I return* EINTR just for safety.* Update: ERESTARTSYS breaks at least the xview clock binary, so |
SYSCALL_DEFINE3 | |
compat_core_sys_select | We can actually return ERESTARTSYS instead of EINTR, but I'd* like to be certain this leads to no problems. So I return* EINTR just for safety.* Update: ERESTARTSYS breaks at least the xview clock binary, so |
inode_lru_isolate | Isolate the inode from the LRU in preparation for freeing it |
alloc_fd | |
get_unused_fd_flags | |
put_unused_fd | |
fd_install | |
__close_fd_get_file | variant of __close_fd that gets a ref on the file for later fput |
__fget | |
__fget_light | Lightweight file lookup - no refcnt increment if fd table isn't shared.* You can use this instead of fget if you satisfy all of the following* conditions:* 1) You must call fput_light before exiting the syscall and returning control* to userspace (i |
set_close_on_exec | We only lock f_pos if we have threads or if the file might be* shared with another process. In both cases we'll have an elevated* file count (done either by fdget() or by fork()). |
get_close_on_exec | |
replace_fd | |
ksys_dup3 | |
SYSCALL_DEFINE2 | |
__is_local_mountpoint | __is_local_mountpoint - Test to see if dentry is a mountpoint in the* current mount namespace |
check_mnt | |
do_umount | |
may_mount | Is the caller allowed to modify his namespace? |
mnt_ns_loop | |
attach_recursive_mnt | @source_mnt : mount tree to be attached*@nd : place the mount tree @source_mnt is attached*@parent_nd : if non-null, detach the source_mnt from its parent and* store the parent mount and mountpoint dentry |
open_detached_copy | |
SYSCALL_DEFINE3 | Create a kernel mount representation for a new, prepared superblock* (specified by fs_fd) and attach to an open_tree-like file descriptor. |
SYSCALL_DEFINE2 | pivot_root Semantics:* Moves the root file system of the current process to the directory put_old,* makes new_root as the new root file system of the current process, and sets* root/cwd of all processes which had them on the current root to new_root |
init_mount_tree | |
current_chrooted | |
mount_too_revealing | |
mntns_install | |
wb_workfn | Handle writeback of dirty data for the device backed by this bdi. Also* reschedules periodically and does kupdated style flushing. |
block_dump___mark_inode_dirty | |
splice_direct_to_actor | splice_direct_to_actor - splices data directly between two non-pipes*@in: file to splice from*@sd: actor information on where to splice to*@actor: handles the data splicing* Description:* This is a special case helper to splice directly between two |
d_path | d_path - return the path of a dentry*@path: path to report*@buf: buffer to return value in*@buflen: buffer length* Convert a dentry into an ASCII path name |
SYSCALL_DEFINE2 | NOTE! The user-level library version returns a* character pointer |
unshare_fs_struct | |
current_umask | |
alloc_fs_context | alloc_fs_context - Create a filesystem context.*@fs_type: The filesystem type.*@reference: The dentry from which this one derives (or NULL)*@sb_flags: Filesystem/superblock flags (SB_*)*@sb_flags_mask: Applicable members of @sb_flags |
SYSCALL_DEFINE2 | Open a filesystem by name so that it can be configured for mounting.* We are allowed to specify a container in which the filesystem will be* opened, thereby indicating which namespaces will be used (notably, which |
SYSCALL_DEFINE3 | Pick a superblock into a context for reconfiguration. |
SYSCALL_DEFINE2 | There are no bdflush tunables left. But distributions are* still running obsolete flush daemons, so we terminate them here.* Use of bdflush() is deprecated and will be removed in a future kernel. |
mpage_alloc | |
fcntl_dirnotify | When a process calls fcntl to attach a dnotify watch to a directory it ends* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be* attached to the fsnotify_mark. |
inotify_new_group | |
SYSCALL_DEFINE2 | anotify syscalls |
signalfd_poll | |
signalfd_dequeue | |
do_signalfd4 | |
handle_userfault | The locking rules involved in returning VM_FAULT_RETRY depending on* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and* FAULT_FLAG_KILLABLE are not straightforward |
userfaultfd_event_wait_completion | |
SYSCALL_DEFINE1 | |
aio_free_ring | |
aio_setup_ring | |
ioctx_alloc | x_alloc* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
lookup_ioctx | |
SYSCALL_DEFINE2 | sys_io_setup:* Create an aio_context capable of receiving at least nr_events |
COMPAT_SYSCALL_DEFINE2 | |
SYSCALL_DEFINE1 | sys_io_destroy:* Destroy the aio_context specified. May cancel any outstanding * AIOs and block on completion. Will fail with -ENOSYS if not* implemented. May fail with -EINVAL if the context pointed to* is invalid. |
io_grab_files | |
io_sq_offload_start | |
io_sqe_buffer_register | |
__io_worker_unuse | Note: drops the wqe->lock if returning true! The caller must re-acquire* the lock in that case. Some callers need to restart handling if this* happens, so we can't just re-acquire the lock on behalf of the caller. |
io_worker_exit | |
io_worker_start | |
io_worker_handle_work | |
set_encryption_policy | |
flock_make_lock | Fill in a file_lock structure with an appropriate FLOCK lock. |
flock64_to_posix_lock | |
lease_init | Initialize a lease, use the default lock manager operations |
fcntl_setlk | Apply the lock described by l to an open file descriptor.* This implements both the F_SETLK and F_SETLKW commands of fcntl(). |
fcntl_setlk64 | Apply the lock described by l to an open file descriptor.* This implements both the F_SETLK and F_SETLKW commands of fcntl(). |
locks_remove_posix | This function is called when the file is being removed* from the task's fd array. POSIX locks belonging to this task* are deleted at this time. |
create_aout_tables | reate_aout_tables() parses the env- and arg-strings in new user* memory and creates the pointer tables from them, and puts their* addresses on the "stack", returning the new stack pointer value. |
load_aout_binary | These are the functions used to load a.out style executables and shared* libraries. There is no binary dependent code anywhere else. |
set_brk | |
create_elf_tables | |
elf_map | |
load_elf_binary | |
load_elf_fdpic_binary | load an fdpic binary into various bits of memory |
create_elf_fdpic_tables | present useful information to the program by shovelling it onto the new* process's stack |
flat_core_dump | |
create_flat_tables | reate_flat_tables() parses the env- and arg-strings in new user* memory and creates the pointer tables from them, and puts their* addresses on the "stack", recording the new stack pointer value. |
calc_reloc | |
load_flat_file | |
load_flat_binary | These are the functions used to load flat style executables and shared* libraries. There is no binary dependent code anywhere else. |
cn_print_exe_file | |
format_corename | rmat_corename will inspect the pattern parameter, and output a* name into corename, which must have space for at least* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. |
coredump_finish | |
umh_pipe_setup | mh_pipe_setup* helper function to customize the process used* to collect the core in userspace. Specifically* it sets up a pipe and installs it as fd 0 (stdin)* for the process. Returns 0 on success, or* PTR_ERR on failure. |
do_coredump | |
drop_caches_sysctl_handler | |
get_vfsmount_from_fd | |
iomap_do_writepage | Write out a dirty page.* For delalloc space on the page we need to allocate space and flush it.* For unwritten space on the page we need to start the conversion to* regular allocated space. |
parse_mount_options | parse_mount_options():* Set @opts to mount options specified in @data. If an option is not* specified in @data, set it to its default value.* Note: @data may be NULL (in which case all options are set to default). |
ramfs_mmu_get_unmapped_area | |
ratelimit_state_exit | |
kernel_signal_stop | |
set_restore_sigmask | Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. |
clear_restore_sigmask | |
test_restore_sigmask | |
test_and_clear_restore_sigmask | |
restore_saved_sigmask | |
sigmask_to_save | |
on_sig_stack | True if we are on the alternate signal stack. |
sas_ss_flags | |
sigsp | |
current_gfp_context | Applies per-task gfp context to the given allocation flags.* PF_MEMALLOC_NOIO implies GFP_NOIO* PF_MEMALLOC_NOFS implies GFP_NOFS* PF_MEMALLOC_NOCMA implies no allocation from CMA region. |
memalloc_noio_save | memalloc_noio_save - Marks implicit GFP_NOIO allocation scope |
memalloc_noio_restore | memalloc_noio_restore - Ends the implicit GFP_NOIO scope.*@flags: Flags to restore.* Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.* Always make sure that that the given flags is the return value from the |
memalloc_nofs_save | memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope |
memalloc_nofs_restore | memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.*@flags: Flags to restore.* Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.* Always make sure that that the given flags is the return value from the |
memalloc_noreclaim_save | |
memalloc_noreclaim_restore | |
memalloc_nocma_save | |
memalloc_nocma_restore | |
memalloc_use_memcg | memalloc_use_memcg - Starts the remote memcg charging scope.*@memcg: memcg to charge.* This function marks the beginning of the remote memcg charging scope. All the* __GFP_ACCOUNT allocations till the end of the scope will be charged to the* given memcg. |
memalloc_unuse_memcg | memalloc_unuse_memcg - Ends the remote memcg charging scope.* This function marks the end of the remote memcg charging scope started by* memalloc_use_memcg(). |
get_current_ioprio | If the calling process has set an I/O priority, use that. Otherwise, return* the default I/O priority. |
arch_compat_alloc_user_space | |
guest_enter_irqoff | |
guest_exit_irqoff | |
utsname | |
ptrace_event | ptrace_event - possibly stop for a ptrace event notification*@event: %PTRACE_EVENT_* value to report*@message: value for %PTRACE_GETEVENTMSG to return* Check whether @event is enabled and, if so, report @event and @message* to the ptrace parent. |
ptrace_event_pid | ptrace_event_pid - possibly stop for a ptrace event notification*@event: %PTRACE_EVENT_* value to report*@pid: process identifier for %PTRACE_GETEVENTMSG to return* Check whether @event is enabled and, if so, report @event and @pid* to the ptrace parent |
ptrace_init_task | 调试跟踪初始化 |
cgroup_init_kthreadd | |
cgroup_kthread_ready | |
nmi_uaccess_okay | Blindly accessing user memory from NMI context can be dangerous* if we're in the middle of switching the current user task or* switching the loaded mm. It can also be dangerous if we* interrupted some kernel code that was temporarily using a* different mm. |
vma_is_foreign | We only want to enforce protection keys on the current process* because we effectively have no access to PKRU for other* processes or any way to tell *which * PKRU in a threaded* process we could use |
mem_cgroup_enter_user_fault | |
mem_cgroup_exit_user_fault | |
current_is_kswapd | |
ksys_close | In contrast to sys_close(), this stub does not check whether the syscall* should or should not be restarted, but returns the raw error codes from* __close_fd(). |
ksys_personality | |
current_restore_flags | |
set_fs | |
pagefault_disabled_inc | |
pagefault_disabled_dec | |
pagefault_disabled | Is the pagefault handler disabled? If so, user access methods will not sleep. |
write_pkru | |
sk_page_frag | 返回一个合适的page_frag |
__fpregs_load_activate | Internal helper, do not use directly. Use switch_fpu_return() instead. |
switch_fpu_prepare | FPU state switching for scheduling.* This is a two-stage process:* - switch_fpu_prepare() saves the old state.* This is done within the context of the old process.* - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state |
switch_fpu_finish | Load PKRU from the FPU context if available. Delay loading of the* complete FPU state until the return to userland. |
ptrace_report_syscall | ptrace report for syscall entry and exit looks identical. |
tracehook_notify_resume | racehook_notify_resume - report when about to return to user mode*@regs: user-mode registers of @current task* This is called when %TIF_NOTIFY_RESUME has been set. Now we are* about to return to user mode, and the user state in @regs can be |
set_current_oom_origin | |
clear_current_oom_origin | |
current_wq_worker | rrent_wq_worker - return struct worker if %current is a workqueue worker |
io_wq_current_is_worker | |
trace_test_and_set_recursion | |
trace_clear_recursion | |
blk_mq_plug | lk_mq_plug() - Get caller context plug*@q: request queue*@bio : the bio being submitted by the caller context* Plugging, by design, may delay the insertion of BIOs into the elevator in* order to increase BIO merging opportunities |
create_io_context | reate_io_context - try to create task->io_context*@gfp_mask: allocation mask*@node: allocation node* If %current->io_context is %NULL, allocate a new io_context and install* it. Returns the current %current->io_context which may be %NULL if |
tomoyo_sys_getppid | moyo_sys_getppid - Copy of getppid().* Returns parent process's PID.* Alpha does not have getppid() defined. To be able to build this module on* Alpha, I have to copy getppid() from kernel/timer.c. |
set_encryption_policy |
源代码转换工具 开放的插件接口 | X |
---|---|
支持:c/c++/esqlc/java Oracle/Informix/Mysql 插件可实现:逻辑报告 代码生成和批量转换代码 |