Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:fs\mpage.c Create Date:2022-07-28 20:15:32
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:This is the worker routine which does all the work of mapping the disk* blocks and constructs largest possible bios, submits them for IO if the* blocks are not contiguous on the disk

Proto:static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)

Type:struct bio

Parameter:

TypeParameterName
struct mpage_readpage_args *args
158  page = page
159  inode = host
160  blkbits = i_blkbits
161  blocks_per_page = PAGE_SIZE >> blkbits
162  blocksize = 1 << blkbits
163  map_bh = map_bh
169  first_hole = blocks_per_page
170  struct block_device * bdev = NULL
172  fully_mapped = 1
178  If is_readahead Then
179  op_flags = REQ_RAHEAD
180  gfp = readahead_gfp_mask( See page-flags.h for PAGE_MAPPING_FLAGS )
181  Else
182  op_flags = 0
183  gfp = Restricts the given gfp_mask to what the mapping allows.
186  If page_has_buffers(page) Then Go to confused
189  block_in_file = Our offset within mapping. << PAGE_SHIFT determines the page size - blkbits
190  last_block = block_in_file + nr_pages * blocks_per_page
191  last_block_in_file = NOTE: in a 32bit arch with a preemptable kernel and* an UP compile the i_size_read/write must be atomic* with respect to the local cpu (unlike with preempt disabled),* but they don't need to be atomic with respect to other cpus like in* true SMP (so they + blocksize - 1 >> blkbits
192  If last_block > last_block_in_file Then last_block = last_block_in_file
194  page_block = 0
199  nblocks = size of mapping >> blkbits
200  If buffer_mapped(map_bh) && block_in_file > first_logical_block && block_in_file < first_logical_block + nblocks Then
203  map_offset = block_in_file - first_logical_block
204  last = nblocks - map_offset
206  cycle
207  If relative_block == last Then
209  Break
211  If page_block == blocks_per_page Then Break
215  page_block++
216  block_in_file++
218  bdev = b_bdev
224  the page this bh is mapped to = page
225  When page_block < blocks_per_page cycle
226  buffer state bitmap (see above) = 0
227  size of mapping = 0
229  If block_in_file < last_block Then
231  If get_block(inode, block_in_file, map_bh, 0) Then Go to confused
236  If Not buffer_mapped(map_bh) Then
237  fully_mapped = 0
240  page_block++
241  block_in_file++
242  Continue
253  Go to confused
256  If first_hole != blocks_per_page Then Go to confused
260  If page_block && blocks[page_block - 1] != start block number - 1 Then Go to confused
262  nblocks = size of mapping >> blkbits
263  cycle
264  If relative_block == nblocks Then
266  Break
267  Else if page_block == blocks_per_page Then Break
270  page_block++
271  block_in_file++
273  bdev = b_bdev
276  If first_hole != blocks_per_page Then
277  zero_user_segment(page, first_hole << blkbits, PAGE_SIZE)
278  If first_hole == 0 Then
281  Go to out
283  Else if fully_mapped Then
284  SetPageMappedToDisk(page)
287  If fully_mapped && blocks_per_page == 1 && Not PageUptodate(page) && cleancache_get_page(page) == 0 Then
289  SetPageUptodate(page)
290  Go to confused
296  If bio && last_block_in_bio != blocks[0] - 1 Then bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio)
299  alloc_new :
300  If (bio == NULL) Then
301  If first_hole == blocks_per_page Then
306  bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), min_t - return minimum of two values, using the specified type*@type: data type to use*@x: first value*@y: second value(int, nr_pages, BIO_MAX_PAGES), gfp)
310  If (bio == NULL) Then Go to confused
314  length = first_hole << blkbits
315  If bio_add_page(bio, page, length, 0) < length Then
316  bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio)
317  Go to alloc_new
320  relative_block = block_in_file - first_logical_block
321  nblocks = size of mapping >> blkbits
322  If buffer_boundary(map_bh) && relative_block == nblocks || first_hole != blocks_per_page Then bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio)
325  Else last_block_in_bio = blocks[blocks_per_page - 1]
327  out :
328  Return bio
330  confused :
331  If bio Then bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio)
333  If Not PageUptodate(page) Then Generic "read page" function for block devices that have the normal* get_block functionality
335  Else lock_page - unlock a locked page*@page: the page* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().* Also wakes sleepers in wait_on_page_writeback() because the wakeup* mechanism between PageLocked pages and PageWriteback pages is shared.
337  Go to out
Caller
NameDescribe
mpage_readpagesmpage_readpages - populate an address space with some pages & start reads against them*@mapping: the address_space*@pages: The address of a list_head which contains the target pages. These
mpage_readpageThis isn't called much at all