Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\hugetlb.c Create Date:2022-07-28 15:28:53
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:hugetlb_no_page

Proto:static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, unsigned long idx, unsigned long address, pte_t *ptep, unsigned int flags)

Type:vm_fault_t

Parameter:

TypeParameterName
struct mm_struct *mm
struct vm_area_struct *vma
struct address_space *mapping
unsigned longidx
unsigned longaddress
pte_t *ptep
unsigned intflags
3793  h = hstate_vma(vma)
3794  ret = VM_FAULT_SIGBUS
3795  anon_rmap = 0
3800  haddr = address & huge_page_mask(h)
3801  bool new_page = false
3808  If is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED) Then
3809  pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", pid)
3811  Return ret
3818  retry :
3819  page = locate, pin and lock a pagecache page
3820  If Not page Then
3821  size = NOTE: in a 32bit arch with a preemptable kernel and* an UP compile the i_size_read/write must be atomic* with respect to the local cpu (unlike with preempt disabled),* but they don't need to be atomic with respect to other cpus like in* true SMP (so they >> huge_page_shift(h)
3822  If idx >= size Then Go to out
3828  If userfaultfd_missing(vma) Then
3852  Go to out
3855  page = alloc_huge_page(vma, haddr, 0)
3856  If IS_ERR(page) Then
3869  ptl = huge_pte_lock(h, mm, ptep)
3870  If Not huge_pte_none(huge_ptep_get(ptep)) Then
3871  ret = 0
3872  spin_unlock(ptl)
3873  Go to out
3875  spin_unlock(ptl)
3876  ret = vmf_error(PTR_ERR(page))
3877  Go to out
3879  clear_huge_page(page, address, pages_per_huge_page(h))
3880  __SetPageUptodate(page)
3881  new_page = true
3883  If Flags, see mm.h. & VM_MAYSHARE Then
3885  If err Then
3891  Else
3894  ret = VM_FAULT_OOM
3895  Go to backout_unlocked
3897  anon_rmap = 1
3899  Else
3908  Go to backout_unlocked
3918  If flags & Fault was a write access && Not (Flags, see mm.h. & VM_SHARED) Then
3919  If vma_needs_reservation(h, vma, haddr) < 0 Then
3920  ret = VM_FAULT_OOM
3921  Go to backout_unlocked
3924  vma_end_reservation(h, vma, haddr)
3927  ptl = huge_pte_lock(h, mm, ptep)
3928  size = NOTE: in a 32bit arch with a preemptable kernel and* an UP compile the i_size_read/write must be atomic* with respect to the local cpu (unlike with preempt disabled),* but they don't need to be atomic with respect to other cpus like in* true SMP (so they >> huge_page_shift(h)
3929  If idx >= size Then Go to backout
3932  ret = 0
3933  If Not huge_pte_none(huge_ptep_get(ptep)) Then Go to backout
3936  If anon_rmap Then
3937  Private page markings that may be used by the filesystem that owns the page* for its own purposes.* - PG_private and PG_private_2 cause releasepage() and co to be invoked
3938  hugepage_add_new_anon_rmap(page, vma, haddr)
3939  Else page_dup_rmap(page, true)
3941  new_pte = make_huge_pte(vma, page, ((Flags, see mm.h. & VM_WRITE) && (Flags, see mm.h. & VM_SHARED)))
3943  set_huge_pte_at(mm, haddr, ptep, new_pte)
3945  hugetlb_count_add(pages_per_huge_page(h), mm)
3946  If flags & Fault was a write access && Not (Flags, see mm.h. & VM_SHARED) Then
3948  ret = Hugetlb_cow() should be called with page lock of the original hugepage held.* Called with hugetlb_instantiation_mutex held and pte_page locked so we* cannot race with other handlers or page migration.
3951  spin_unlock(ptl)
3958  If new_page Then ver called for tail page
3961  lock_page - unlock a locked page*@page: the page* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().* Also wakes sleepers in wait_on_page_writeback() because the wakeup* mechanism between PageLocked pages and PageWriteback pages is shared.
3962  out :
3963  Return ret
3965  backout :
3966  spin_unlock(ptl)
3967  backout_unlocked :
3968  lock_page - unlock a locked page*@page: the page* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().* Also wakes sleepers in wait_on_page_writeback() because the wakeup* mechanism between PageLocked pages and PageWriteback pages is shared.
3969  This routine is called to restore a reservation on error paths
3970  Perform a free_page(), also freeing any swap cache associated with* this page if it is the last user of the page.
3971  Go to out
Caller
NameDescribe
hugetlb_fault