Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:fs\iomap\direct-io.c Create Date:2022-07-28 20:32:09
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:map_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO* is being issued as AIO or not. This allows us to optimise pure data writes* to use REQ_FUA rather than requiring generic_write_sync() to issue a* REQ_FLUSH post write

Proto:ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, bool wait_for_completion)

Type:ssize_t

Parameter:

TypeParameterName
struct kiocb *iocb
struct iov_iter *iter
const struct iomap_ops *ops
const struct iomap_dio_ops *dops
boolwait_for_completion
405  mapping = f_mapping
406  inode = file_inode(ki_filp)
407  count = iov_iter_count(iter)
408  pos = The 'ki_filp' pointer is shared in a union for aio
409  end = The 'ki_filp' pointer is shared in a union for aio + count - 1 , ret = 0
410  flags = direct I/O
414  lockdep_assert_held( & i_rwsem)
416  If Not count Then Return 0
419  If WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion) Then Return -EIO
422  dio = Allocation memory
423  If Not dio Then Return -ENOMEM
426  iocb = iocb
427  atomic_set( & ref, 1)
428  size = 0
429  i_size = NOTE: in a 32bit arch with a preemptable kernel and* an UP compile the i_size_read/write must be atomic* with respect to the local cpu (unlike with preempt disabled),* but they don't need to be atomic with respect to other cpus like in* true SMP (so they
430  dops = dops
431  error = 0
432  flags = 0
434  iter = iter
435  waiter = current process
436  cookie = BLK_QC_T_NONE
437  last_queue = NULL
439  If iov_iter_rw(iter) == generic data direction definitions Then
440  If pos >= i_size Then Go to out_free_dio
443  If iter_is_iovec(iter) Then flags |= IOMAP_DIO_DIRTY
445  Else
446  flags |= writing, must allocate blocks
447  flags |= IOMAP_DIO_WRITE
450  If ki_flags & IOCB_DSYNC Then flags |= IOMAP_DIO_NEED_SYNC
459  If (ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC Then flags |= Private flags for iomap_dio, must not overlap with the public ones in* iomap.h:
463  If ki_flags & IOCB_NOWAIT Then
465  ret = -EAGAIN
466  Go to out_free_dio
468  flags |= do not block
471  ret = lemap_write_and_wait_range - write out & wait on a file range*@mapping: the address_space for the pages*@lstart: offset in bytes where the range starts*@lend: offset in bytes where the range ends (inclusive)
472  If ret Then Go to out_free_dio
481  ret = validate_inode_pages2_range - remove range of pages from an address_space*@mapping: the address_space*@start: the page offset 'from' which to invalidate*@end: the page offset 'to' which to invalidate (inclusive)* Any pages which are found to be mapped
483  If ret Then Warn about a page cache invalidation failure during a direct I/O write.
485  ret = 0
487  If iov_iter_rw(iter) == WRITE && Not wait_for_completion && Not s_dio_done_wq Then
489  ret = Create workqueue for deferred direct IO completions. We allocate the* workqueue when it's first needed. This avoids creating workqueue for* filesystems that don't need it and also allows us to create the workqueue
490  If ret < 0 Then Go to out_free_dio
494  de_dio_begin - signal start of a direct I/O requests*@inode: inode the direct I/O happens on* This is called once we've finished processing a direct I/O request,* and is used to wake up callers waiting for direct I/O to be quiesced.
496  blk_start_plug( & plug)
497  Do
498  ret = Execute a iomap write on a segment of the mapping that spans a* contiguous range of pages that have identical block mapping state
500  If ret <= 0 Then
502  If ret == -ENOTBLK Then
503  wait_for_completion = true
504  ret = 0
506  Break
508  pos += ret
517  Break
519  When (count = iov_iter_count(iter)) > 0 cycle
520  blk_finish_plug( & plug)
522  If ret < 0 Then Set an error in the dio if none is set yet. We have to use cmpxchg* as the submission context and the completion context(s) can race to* update the error.
529  If flags & Private flags for iomap_dio, must not overlap with the public ones in* iomap.h: Then flags &= ~IOMAP_DIO_NEED_SYNC
532  WRITE_ONCE( for ->iopoll , cookie)
533  WRITE_ONCE(private, last_queue)
550  wait_for_completion = wait_for_completion
551  If Not atomic_dec_and_test( & ref) Then
552  If Not wait_for_completion Then Return -cb queued, will get completion event
555  cycle
557  If Not READ_ONCE(waiter) Then Break
560  If Not (ki_flags & IOCB_HIPRI) || Not last_queue || Not blk_poll(last_queue, cookie, true) Then io_schedule()
566  set_current_state() includes a barrier so that the write of current->state* is correctly serialised wrt the caller's subsequent test of whether to* actually sleep:* for (;;) {* set_current_state(TASK_UNINTERRUPTIBLE);* if (!need_sleep)* break;* (Used in tsk->state: )
569  Return iomap_dio_complete(dio)
571  out_free_dio :
572  kfree(dio)
573  Return ret