Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\percpu.c Create Date:2022-07-28 14:27:19
Last Modify:2022-05-23 13:52:24 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:pcpu_alloc - the percpu allocator*@size: size of area to allocate in bytes*@align: alignment of area (max PAGE_SIZE)*@reserved: allocate from the reserved chunk if available*@gfp: allocation flags* Allocate percpu area of @size bytes aligned at @align

Proto:static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, gfp_t gfp)

Type:void

Parameter:

TypeParameterName
size_tsize
size_talign
boolreserved
gfp_tgfp
1590  pcpu_gfp = gfp & ( GFP_KERNEL | __GFP_NORETRY | DOC: Action modifiers* Action modifiers* ~~~~~~~~~~~~~~~~* %__GFP_NOWARN suppresses allocation failure reports.* %__GFP_COMP address compound page metadata.* %__GFP_ZERO returns a zeroed page on success.)
1591  is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL
1592  do_warn = Not (gfp & DOC: Action modifiers* Action modifiers* ~~~~~~~~~~~~~~~~* %__GFP_NOWARN suppresses allocation failure reports.* %__GFP_COMP address compound page metadata.* %__GFP_ZERO returns a zeroed page on success.)
1593  warn_limit = 10
1598  __percpu * ptr
1607  If Value for the false possibility is greater at compile time(align < PCPU_MIN_ALLOC_SIZE) Then align = PCPU_MIN_ALLOC_SIZE
1610  size = @a is a power of 2 value (size, PCPU_MIN_ALLOC_SIZE)
1611  bits = size >> minimum allocation size and shift in bytes
1612  bit_align = align >> minimum allocation size and shift in bytes
1614  If Value for the false possibility is greater at compile time(!size || size > minimum unit size, also is the maximum supported allocation size || align > PAGE_SIZE || !s_power_of_2() - check if a value is a power of two*@n: the value to check* Determine whether some value is a power of two, where zero is* *not* considered a power of two.* Return: true if @n is a power of 2, otherwise false.) Then
1616  WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", size, align)
1618  Return NULL
1621  If Not is_atomic Then
1627  If gfp & __GFP_NOFAIL Then mutex_lock( & pcpu_alloc_mutex)
1629  Else if mutex_lock_killable( & pcpu_alloc_mutex) Then Return NULL
1633  spin_lock_irqsave( & pcpu_lock, flags)
1636  If reserved && Optional reserved chunk. This chunk reserves part of the first* chunk and serves it for reserved allocations. When the reserved* region doesn't exist, the following variable is NULL. Then
1637  chunk = Optional reserved chunk. This chunk reserves part of the first* chunk and serves it for reserved allocations. When the reserved* region doesn't exist, the following variable is NULL.
1639  off = pcpu_find_block_fit - finds the block index to start searching*@chunk: chunk of interest*@alloc_bits: size of request in allocation units*@align: alignment of area (max PAGE_SIZE bytes)*@pop_only: use populated regions only* Given a chunk and an
1640  If off < 0 Then
1641  err = "alloc from reserved chunk failed"
1642  Go to fail_unlock
1645  off = pcpu_alloc_area - allocates an area from a pcpu_chunk*@chunk: chunk of interest*@alloc_bits: size of request in allocation units*@align: alignment of area (max PAGE_SIZE)*@start: bit_off to start searching* This function takes in a @start offset to begin
1646  If off >= 0 Then Go to area_found
1649  err = "alloc from reserved chunk failed"
1650  Go to fail_unlock
1653  restart :
1655  When slot < pcpu_nr_slots cycle
1657  off = pcpu_find_block_fit - finds the block index to start searching*@chunk: chunk of interest*@alloc_bits: size of request in allocation units*@align: alignment of area (max PAGE_SIZE bytes)*@pop_only: use populated regions only* Given a chunk and an
1659  If off < 0 Then
1662  Continue
1665  off = pcpu_alloc_area - allocates an area from a pcpu_chunk*@chunk: chunk of interest*@alloc_bits: size of request in allocation units*@align: alignment of area (max PAGE_SIZE)*@start: bit_off to start searching* This function takes in a @start offset to begin
1666  If off >= 0 Then Go to area_found
1672  spin_unlock_irqrestore( & pcpu_lock, flags)
1679  If is_atomic Then
1680  err = "atomic alloc failed, no space left"
1681  Go to fail
1684  If list_empty - tests whether a list is empty*@head: the list to test. Then
1685  chunk = pcpu_create_chunk(pcpu_gfp)
1686  If Not chunk Then
1687  err = "failed to allocate new chunk"
1688  Go to fail
1691  spin_lock_irqsave( & pcpu_lock, flags)
1692  pcpu_chunk_relocate - put chunk in the appropriate chunk slot*@chunk: chunk of interest*@oslot: the previous slot it was on* This function is called after an allocation or free changed @chunk
1693  Else
1694  spin_lock_irqsave( & pcpu_lock, flags)
1697  Go to restart
1699  area_found :
1700  pcpu_stats_area_alloc(chunk, size)
1701  spin_unlock_irqrestore( & pcpu_lock, flags)
1704  If Not is_atomic Then
1707  page_start = PFN_DOWN(off)
1708  page_end = PFN_UP(off + size)
1712  WARN_ON( no [de]population allowed )
1714  ret = pcpu_populate_chunk - populate and map an area of a pcpu_chunk*@chunk: chunk of interest*@page_start: the start page*@page_end: the end page*@gfp: allocation flags passed to the underlying memory allocator* For each cpu, populate and map pages
1716  spin_lock_irqsave( & pcpu_lock, flags)
1717  If ret Then
1719  err = "failed to populate"
1720  Go to fail_unlock
1722  pcpu_chunk_populated - post-population bookkeeping*@chunk: pcpu_chunk which got populated*@page_start: the start page*@page_end: the end page* Pages in [@page_start,@page_end) have been populated to @chunk. Update* the bookkeeping information accordingly
1723  spin_unlock_irqrestore( & pcpu_lock, flags)
1726  mutex_unlock - release the mutex*@lock: the mutex to be released* Unlock a mutex that has been locked by this task previously.* This function must not be used in interrupt context. Unlocking* of a not locked mutex is not allowed.
1729  If The number of empty populated pages, protected by pcpu_lock. The* reserved chunk doesn't contribute to the count. < PCPU_EMPTY_POP_PAGES_LOW Then pcpu_schedule_balance_work()
1733  for_each_possible_cpu(cpu)
1734  memset((void * )pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size)
1736  ptr = UP, it's always identity mapped ( base address of this chunk + off)
1737  kmemleak_alloc_percpu(ptr, size, gfp)
1739  trace_percpu_alloc_percpu(reserved, is_atomic, size, align, base address of this chunk , off, ptr)
1742  Return ptr
1744  fail_unlock :
1745  spin_unlock_irqrestore( & pcpu_lock, flags)
1746  fail :
1747  trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align)
1749  If Not is_atomic && do_warn && warn_limit Then
1750  pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", size, align, is_atomic, err)
1752  dump_stack()
1753  If Not --warn_limit Then pr_info("limit reached, disable warning\n")
1756  If is_atomic Then
1758  pcpu_atomic_alloc_failed = true
1759  pcpu_schedule_balance_work()
1760  Else
1761  mutex_unlock - release the mutex*@lock: the mutex to be released* Unlock a mutex that has been locked by this task previously.* This function must not be used in interrupt context. Unlocking* of a not locked mutex is not allowed.
1763  Return NULL
Caller
NameDescribe
__alloc_percpu_gfp__alloc_percpu_gfp - allocate dynamic percpu area*@size: size of area to allocate in bytes*@align: alignment of area (max PAGE_SIZE)*@gfp: allocation flags* Allocate zero-filled percpu area of @size bytes aligned at @align
__alloc_percpuallocate one copy of the object for every present
__alloc_reserved_percpu__alloc_reserved_percpu - allocate reserved percpu area*@size: size of area to allocate in bytes*@align: alignment of area (max PAGE_SIZE)* Allocate zero-filled percpu area of @size bytes aligned at @align* from reserved percpu area if arch has set it up;