Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:kernel\bpf\syscall.c Create Date:2022-07-28 12:52:38
Last Modify:2022-05-19 18:06:12 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:map_update_elem

Proto:static int map_update_elem(union bpf_attr *attr)

Type:int

Parameter:

TypeParameterName
union bpf_attr *attr
949  __user * ukey = u64_to_user_ptr(key)
950  __user * uvalue = u64_to_user_ptr(value)
951  ufd = anonymous struct used by BPF_MAP_*_ELEM commands
958  If helper macro to check that unused fields 'union bpf_attr' are zero (BPF_MAP_UPDATE_ELEM) Then Return -EINVAL
961  f = fdget(ufd)
962  map = error is returned, fd is released.* On success caller should complete fd access with matching fdput()
963  If IS_ERR(map) Then Return PTR_ERR(map)
965  If Not (map_get_sys_perms(map, f) & Has write method(s) ) Then
966  err = -EPERM
967  Go to err_put
970  If flags & spin_lock-ed map_lookup/map_update && Not map_value_has_spin_lock(map) Then
972  err = -EINVAL
973  Go to err_put
976  key = __bpf_copy_key(ukey, key_size)
977  If IS_ERR(key) Then
978  err = PTR_ERR(key)
979  Go to err_put
982  If map_type == BPF_MAP_TYPE_PERCPU_HASH || map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || map_type == BPF_MAP_TYPE_PERCPU_ARRAY || map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE Then value_size = und_up - round up to next specified power of 2*@x: the value to round*@y: multiple to round up to (must be a power of 2)* Rounds @x up to next multiple of @y (which must be a power of 2).* To perform arbitrary rounding up, use roundup() below.(value_size, 8) * num_possible_cpus()
987  Else value_size = value_size
990  err = -ENOMEM
991  value = Allocation memory
992  If Not value Then Go to free_key
995  err = -EFAULT
996  If copy_from_user(value, uvalue, value_size) != 0 Then Go to free_value
1000  If bpf_map_is_dev_bound(map) Then
1001  err = bpf_map_offload_update_elem(map, key, value, flags)
1002  Go to out
1003  Else if map_type == BPF_MAP_TYPE_CPUMAP || map_type == BPF_MAP_TYPE_SOCKHASH || map_type == BPF_MAP_TYPE_SOCKMAP Then
1006  err = map_update_elem(map, key, value, flags)
1007  Go to out
1008  Else if IS_FD_PROG_ARRAY(map) Then
1009  err = ly called from syscall
1011  Go to out
1017  Even if we don't have any preemption, we need preempt disable/enable* to be barriers, so that we don't have things like get_user/put_user* that can cause faults and scheduling migrate into our preempt-protected* region.()
1018  __this_cpu_inc(bpf_prog_active)
1019  If map_type == BPF_MAP_TYPE_PERCPU_HASH || map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH Then
1021  err = bpf_percpu_hash_update(map, key, value, flags)
1022  Else if map_type == BPF_MAP_TYPE_PERCPU_ARRAY Then
1023  err = bpf_percpu_array_update(map, key, value, flags)
1024  Else if map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE Then
1025  err = bpf_percpu_cgroup_storage_update(map, key, value, flags)
1027  Else if IS_FD_ARRAY(map) Then
1028  _read_lock() - mark the beginning of an RCU read-side critical section* When synchronize_rcu() is invoked on one CPU while other CPUs* are within RCU read-side critical sections, then the* synchronize_rcu() is guaranteed to block until after all the other
1029  err = ly called from syscall
1031  _read_unlock() - marks the end of an RCU read-side critical section.* In most situations, rcu_read_unlock() is immune from deadlock.* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
1032  Else if map_type == BPF_MAP_TYPE_HASH_OF_MAPS Then
1033  _read_lock() - mark the beginning of an RCU read-side critical section* When synchronize_rcu() is invoked on one CPU while other CPUs* are within RCU read-side critical sections, then the* synchronize_rcu() is guaranteed to block until after all the other
1034  err = ly called from syscall
1036  _read_unlock() - marks the end of an RCU read-side critical section.* In most situations, rcu_read_unlock() is immune from deadlock.* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
1037  Else if map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY Then
1039  err = Called from syscall only.* The "nsk" in the fd refcnt.* The "osk" and "reuse" are protected by reuseport_lock.
1041  Else if map_type == BPF_MAP_TYPE_QUEUE || map_type == BPF_MAP_TYPE_STACK Then
1043  err = map_push_elem(map, value, flags)
1044  Else
1045  _read_lock() - mark the beginning of an RCU read-side critical section* When synchronize_rcu() is invoked on one CPU while other CPUs* are within RCU read-side critical sections, then the* synchronize_rcu() is guaranteed to block until after all the other
1046  err = map_update_elem(map, key, value, flags)
1047  _read_unlock() - marks the end of an RCU read-side critical section.* In most situations, rcu_read_unlock() is immune from deadlock.* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
1049  __this_cpu_dec(bpf_prog_active)
1050  preempt_enable()
1051  maybe_wait_bpf_programs(map)
1052  out :
1053  free_value :
1054  kfree(value)
1055  free_key :
1056  kfree(key)
1057  err_put :
1058  fdput(f)
1059  Return err