Function report |
Source Code:mm\memory.c |
Create Date:2022-07-28 14:42:37 |
Last Modify:2020-03-12 14:18:49 | Copyright©Brick |
home page | Tree |
Annotation kernel can get tool activity | Download SCCT | Chinese |
Name:We enter with non-exclusive mmap_sem (to exclude vma changes,* but allow concurrent faults), and pte mapped but not yet locked.* We return with pte unmapped and unlocked.* We return with the mmap_sem locked or unlocked in the same cases
Proto:vm_fault_t do_swap_page(struct vm_fault *vmf)
Type:vm_fault_t
Parameter:
Type | Parameter | Name |
---|---|---|
struct vm_fault * | vmf |
2904 | vma = Target VMA |
2910 | exclusive = 0 |
2911 | ret = 0 |
2913 | If Not handle_pte_fault chooses page fault handler according to an entry which was* read non-atomically Then Go to out |
2916 | entry = Convert the arch-dependent pte representation of a swp_entry_t into an* arch-independent swp_entry_t. |
2918 | If is_migration_entry(entry) Then |
2921 | Else if is_device_private_entry(entry) Then |
2922 | ->fault handlers should return a* page here, unless VM_FAULT_NOPAGE* is set (which is also implied by* VM_FAULT_ERROR). = device_private_entry_to_page(entry) |
2923 | ret = migrate_to_ram(vmf) |
2924 | Else if is_hwpoison_entry(entry) Then |
2925 | ret = VM_FAULT_HWPOISON |
2926 | Else |
2928 | ret = VM_FAULT_SIGBUS |
2930 | Go to out |
2935 | page = lookup_swap_cache(entry, vma, Faulting virtual address ) |
2938 | If Not page Then |
2939 | si = swp_swap_info(entry) |
2941 | If SWP_USED etc: see above & SWP_SYNCHRONOUS_IO && __swap_count(entry) == 1 Then |
2946 | If page Then |
2947 | __SetPageLocked(page) |
2948 | __SetPageSwapBacked(page) |
2949 | set_page_private(page, val) |
2951 | swap_readpage(page, true) |
2953 | Else |
2954 | page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf) |
2959 | If Not page Then |
2969 | Go to unlock |
2973 | ret = VM_FAULT_MAJOR |
2974 | Disable counters |
2976 | Else if PageHWPoison(page) Then |
2989 | If Not locked Then |
2990 | ret |= VM_FAULT_RETRY |
2991 | Go to out_release |
3000 | If Value for the false possibility is greater at compile time((!PageSwapCache(page) || page_private(page) != val)) && swapcache Then Go to out_page |
3004 | page = ksm_might_need_to_copy(page, vma, Faulting virtual address ) |
3005 | If Value for the false possibility is greater at compile time(!page) Then |
3011 | If mem_cgroup_try_charge_delay(page, The address space we belong to. , GFP_KERNEL, & memcg, false) Then |
3013 | ret = VM_FAULT_OOM |
3014 | Go to out_page |
3026 | ret = VM_FAULT_SIGBUS |
3027 | Go to out_nomap |
3043 | If FAULT_FLAG_xxx flags & Fault was a write access && reuse_swap_page(page, NULL) Then |
3046 | ret |= VM_FAULT_WRITE |
3049 | flush_icache_page(vma, page) |
3050 | If pte_swp_soft_dirty(Value of PTE at the time of fault ) Then pte = pte_mksoft_dirty(pte) |
3052 | set_pte_at(The address space we belong to. , Faulting virtual address , Pointer to pte entry matching* the 'address'. NULL if the page* table hasn't been allocated., pte) |
3057 | If Value for the false possibility is greater at compile time(page != swapcache && swapcache) Then |
3058 | page_add_new_anon_rmap(page, vma, Faulting virtual address , false) |
3061 | Else |
3063 | mem_cgroup_commit_charge(page, memcg, true, false) |
3064 | activate_page(page) |
3068 | If mem_cgroup_swap_full(page) || Flags, see mm.h. & VM_LOCKED || PageMlocked(page) Then try_to_free_swap(page) |
3085 | If FAULT_FLAG_xxx flags & Fault was a write access Then |
3087 | If ret & VM_FAULT_ERROR Then ret &= VM_FAULT_ERROR |
3089 | Go to out |
3094 | unlock : |
3095 | pte_unmap_unlock(Pointer to pte entry matching* the 'address'. NULL if the page* table hasn't been allocated., Page table lock.* Protects pte page table if 'pte'* is not NULL, otherwise pmd.) |
3096 | out : |
3097 | Return ret |
3098 | out_nomap : |
3099 | mem_cgroup_cancel_charge(page, memcg, false) |
3100 | pte_unmap_unlock(Pointer to pte entry matching* the 'address'. NULL if the page* table hasn't been allocated., Page table lock.* Protects pte page table if 'pte'* is not NULL, otherwise pmd.) |
3101 | out_page : |
3103 | out_release : |
3109 | Return ret |
Name | Describe |
---|---|
handle_pte_fault | These routines also need to handle stuff like marking pages dirty* and/or accessed for architectures that don't do it in hardware (most* RISC architectures) |
__collapse_huge_page_swapin | Bring missing pages in from swap, to complete THP collapse.* Only done if khugepaged_scan_pmd believes it is worthwhile.* Called and returns without pte mapped or spinlocks held,* but with mmap_sem held to protect against vma changes. |
Source code conversion tool public plug-in interface | X |
---|---|
Support c/c++/esqlc/java Oracle/Informix/Mysql Plug-in can realize: logical Report Code generation and batch code conversion |