Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\swapfile.c Create Date:2022-07-28 15:17:18
Last Modify:2020-03-17 22:19:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:get_swap_pages

Proto:int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)

Type:int

Parameter:

TypeParameterName
intn_goal
swp_entry_tswp_entries
intentry_size
994  size = Define swap_entry_size() as constant to let compiler to optimize* out some code if !CONFIG_THP_SWAP(entry_size)
997  n_ret = 0
1001  WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER)
1003  avail_pgs = atomic_long_read( & nr_swap_pages) / size
1004  If avail_pgs <= 0 Then Go to noswap
1007  If n_goal > SWAP_BATCH Then n_goal = SWAP_BATCH
1010  If n_goal > avail_pgs Then n_goal = avail_pgs
1013  atomic_long_sub(n_goal * size, & nr_swap_pages)
1015  spin_lock( & swap_avail_lock)
1017  start_over :
1018  node = Returns the number of the current Node.
1019  plist_for_each_entry_safe - iterate safely over list of given type*@pos: the type * to use as a loop counter*@n: another type * to use as temporary storage*@head: the head for your list*@m: the name of the list_head within the struct(si, next, & all available (active, not full) swap_info_structs* protected with swap_avail_lock, ordered by priority.* This is used by get_swap_page() instead of swap_active_head* because swap_active_head includes all swap_info_structs,[node], avail_lists[node])
1021  plist_requeue - Requeue @node at end of same-prio entries
1022  spin_unlock( & swap_avail_lock)
1023  spin_lock( & protect map scan related fields like* swap_map, lowest_bit, highest_bit,* inuse_pages, cluster_next,* cluster_nr, lowest_alloc,* highest_alloc, free/discard cluster* list. other fields are only changed* at swapon/swapoff, so are protected* by swap_lock)
1030  WARN(!dex of last free in swap_map , "swap_info %d in list but !highest_bit\n", strange name for an index )
1033  WARN(!(SWP_USED etc: see above & SWP_WRITEOK), "swap_info %d in list but !SWP_WRITEOK\n", strange name for an index )
1038  Go to nextsi
1040  If size == SWAPFILE_CLUSTER Then
1043  Else n_ret = scan_swap_map_slots(si, Flag page is cached, in first swap_map , n_goal, swp_entries)
1046  spin_unlock( & protect map scan related fields like* swap_map, lowest_bit, highest_bit,* inuse_pages, cluster_next,* cluster_nr, lowest_alloc,* highest_alloc, free/discard cluster* list. other fields are only changed* at swapon/swapoff, so are protected* by swap_lock)
1047  If n_ret || size == SWAPFILE_CLUSTER Then Go to check_out
1049  pr_debug("scan_swap_map of si %d failed to find offset\n", strange name for an index )
1052  spin_lock( & swap_avail_lock)
1053  nextsi :
1065  If plist_node_empty - return !0 if plist_node is not on a list*@node: &struct plist_node pointer Then Go to start_over
1069  spin_unlock( & swap_avail_lock)
1071  check_out :
1072  If n_ret < n_goal Then atomic_long_add((long)(n_goal - n_ret) * size, & nr_swap_pages)
1075  noswap :
1076  Return n_ret
Caller
NameDescribe
refill_swap_slots_cachealled with swap slot cache's alloc lock held
get_swap_page