Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\huge_memory.c Create Date:2022-07-28 16:02:38
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:__split_huge_pmd_locked

Proto:static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze)

Type:void

Parameter:

TypeParameterName
struct vm_area_struct *vma
pmd_t *pmd
unsigned longhaddr
boolfreeze
2143  mm = The address space we belong to.
2147  bool young, write, soft_dirty, pmd_migration = false
2151  VM_BUG_ON(haddr & ~HPAGE_PMD_MASK)
2152  VM_BUG_ON_VMA(Our start address within vm_mm. > haddr, vma)
2153  VM_BUG_ON_VMA(The first byte after our end addresswithin vm_mm. < haddr + HPAGE_PMD_SIZE, vma)
2154  VM_BUG_ON(!is_pmd_migration_entry( * pmd) && !pmd_trans_huge( * pmd) && !pmd_devmap( * pmd))
2157  Disable counters
2159  If Not vma_is_anonymous(vma) Then
2160  _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd)
2165  If arch_needs_pgtable_deposit() Then zap_deposited_table(mm, pmd)
2167  If vma_is_dax(vma) Then Return
2169  page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:(_pmd)
2170  If Not PageDirty(page) && pmd_dirty(_pmd) Then Dirty a page
2172  If Not PageReferenced(page) && pmd_young(_pmd) Then SetPageReferenced(page)
2174  page_remove_rmap - take down pte mapping from a page*@page: page to remove mapping from*@compound: uncharge the page as compound or small page* The caller needs to hold the pte lock.
2175  Perform a free_page(), also freeing any swap cache associated with* this page if it is the last user of the page.
2176  add_mm_counter(mm, Optimized variant when page is already known not to be PageAnon , - HPAGE_PMD_NR)
2177  Return
2178  Else if is_huge_zero_pmd( * pmd) Then
2188  Return __split_huge_zero_page_pmd(vma, haddr, pmd)
2211  old_pmd = pmdp_invalidate(vma, haddr, pmd)
2213  pmd_migration = is_pmd_migration_entry(old_pmd)
2214  If Value for the false possibility is greater at compile time(pmd_migration) Then
2217  entry = pmd_to_swp_entry(old_pmd)
2218  page = pfn_to_page(Extract the `offset' field from a swp_entry_t. The swp_entry_t is in* arch-independent format)
2219  write = is_write_migration_entry(entry)
2220  young = false
2221  soft_dirty = pmd_swp_soft_dirty(old_pmd)
2222  Else
2223  page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:(old_pmd)
2224  If pmd_dirty(old_pmd) Then SetPageDirty(page)
2226  write = pmd_write(old_pmd)
2227  young = pmd_young(old_pmd)
2228  soft_dirty = pmd_soft_dirty(old_pmd)
2230  VM_BUG_ON_PAGE(!page_count(page), page)
2231  page_ref_add(page, HPAGE_PMD_NR - 1)
2237  pgtable = "address" argument so destroys page coloring of some arch
2238  pmd_populate(mm, & _pmd, pgtable)
2240  When i < HPAGE_PMD_NR cycle
2247  If freeze || pmd_migration Then
2253  Else
2256  If Not write Then entry = pte_wrprotect(entry)
2258  If Not young Then entry = pte_mkold(entry)
2263  pte = pte_offset_map( & _pmd, addr)
2264  BUG_ON(!pte_none( * pte))
2265  set_pte_at(mm, addr, pte, entry)
2266  atomic_inc( & * If the page can be mapped to userspace, encodes the number * of times this page is referenced by a page table.)
2267  pte_unmap(pte)
2274  If compound_mapcount(page) > 1 && Not TestSetPageDoubleMap(page) Then
2275  When i < HPAGE_PMD_NR cycle atomic_inc( & * If the page can be mapped to userspace, encodes the number * of times this page is referenced by a page table.)
2279  If atomic_add_negative( - 1, compound_mapcount_ptr(page)) Then
2281  __dec_node_page_state(page, NR_ANON_THPS)
2282  If TestClearPageDoubleMap(page) Then
2289  smp_wmb()
2290  pmd_populate(mm, pmd, pgtable)
2292  If freeze Then
2293  When i < HPAGE_PMD_NR cycle
Caller
NameDescribe
__split_huge_pmd