Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\gup.c Create Date:2022-07-28 14:34:31
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:follow_pmd_mask

Proto:static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx)

Type:struct page

Parameter:

TypeParameterName
struct vm_area_struct *vma
unsigned longaddress
pud_t *pudp
unsigned intflags
struct follow_page_context *ctx
316  mm = The address space we belong to.
318  pmd = pmd_offset(pudp, address)
323  pmdval = READ_ONCE( * pmd)
324  If pmd_none(pmdval) Then Return no_page_table(vma, flags)
326  If pmd_huge(pmdval) && Flags, see mm.h. & Huge TLB Page VM Then
327  page = follow_huge_pmd(mm, address, pmd, flags)
328  If page Then Return page
330  Return no_page_table(vma, flags)
332  If is_hugepd(__hugepd(pmd_val(pmdval))) Then
333  page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT)
336  If page Then Return page
338  Return no_page_table(vma, flags)
340  retry :
341  If Not pmd_present(pmdval) Then
342  If Value is more likely to compile time(!(flags & wait for page to replace migration entry )) Then Return no_page_table(vma, flags)
344  VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval))
346  If is_pmd_migration_entry(pmdval) Then pmd_migration_entry_wait(mm, pmd)
348  pmdval = READ_ONCE( * pmd)
353  If pmd_none(pmdval) Then Return no_page_table(vma, flags)
355  Go to retry
357  If pmd_devmap(pmdval) Then
358  ptl = pmd_lock(mm, pmd)
359  page = follow_devmap_pmd(vma, address, pmd, flags, & pgmap)
360  spin_unlock(ptl)
361  If page Then Return page
364  If Value is more likely to compile time(!pmd_trans_huge(pmdval)) Then Return follow_page_pte(vma, address, pmd, flags, & pgmap)
367  If flags & rce NUMA hinting page fault && pmd_protnone(pmdval) Then Return no_page_table(vma, flags)
370  retry_locked :
371  ptl = pmd_lock(mm, pmd)
372  If Value for the false possibility is greater at compile time(pmd_none( * pmd)) Then
373  spin_unlock(ptl)
374  Return no_page_table(vma, flags)
376  If Value for the false possibility is greater at compile time(!pmd_present( * pmd)) Then
377  spin_unlock(ptl)
378  If Value is more likely to compile time(!(flags & wait for page to replace migration entry )) Then Return no_page_table(vma, flags)
380  pmd_migration_entry_wait(mm, pmd)
381  Go to retry_locked
383  If Value for the false possibility is greater at compile time(!pmd_trans_huge( * pmd)) Then
384  spin_unlock(ptl)
385  Return follow_page_pte(vma, address, pmd, flags, & pgmap)
387  If flags & (don't return transhuge pages, split them | split huge pmd before returning ) Then
389  page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:( * pmd)
390  If is_huge_zero_page(page) Then
391  spin_unlock(ptl)
392  ret = 0
398  spin_unlock(ptl)
399  Return ERR_PTR( - ENOMEM)
401  spin_unlock(ptl)
405  put_page(page)
406  If pmd_none( * pmd) Then Return no_page_table(vma, flags)
408  Else
409  spin_unlock(ptl)
411  ret = If pte_alloc(mm, pmd) Then -ENOMEM Else 0
414  Return If ret Then ERR_PTR(ret) Else follow_page_pte(vma, address, pmd, flags, & pgmap)
417  page = follow_trans_huge_pmd(vma, address, pmd, flags)
418  spin_unlock(ptl)
419  page_mask = HPAGE_PMD_NR - 1
420  Return page
Caller
NameDescribe
follow_pud_mask