Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:fs\block_dev.c Create Date:2022-07-28 20:14:44
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:__blkdev_direct_IO_simple

Proto:static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)

Type:ssize_t

Parameter:

TypeParameterName
struct kiocb *iocb
struct iov_iter *iter
intnr_pages
205  file = ki_filp
206  bdev = I_BDEV(bdev_file_inode(file))
208  pos = The 'ki_filp' pointer is shared in a union for aio
209  bool should_dirty = false
214  If (pos | iov_iter_alignment(iter)) & bdev_logical_block_size(bdev) - 1 Then Return -EINVAL
218  If nr_pages <= DIO_INLINE_BIO_VECS Then vecs = inline_vecs
220  Else
221  vecs = kmalloc_array - allocate memory for an array.*@n: number of elements.*@size: element size.*@flags: the type of memory to allocate (see kmalloc).
223  If Not vecs Then Return -ENOMEM
227  bio_init( & bio, vecs, nr_pages)
228  bio_set_dev( & bio, bdev)
229  device address in 512 byte sectors = pos >> 9
230  bi_write_hint = ki_hint
231  bi_private = current process
232  bi_end_io = blkdev_bio_end_io_simple
233  bi_ioprio = See linux/ioprio.h
235  ret = bio_iov_iter_get_pages( & bio, iter)
236  If Value for the false possibility is greater at compile time(ret) Then Go to out
238  ret = residual I/O count
240  If iov_iter_rw(iter) == generic data direction definitions Then
241  bottom bits req flags, * top bits REQ_OP. Use * accessors. = REQ_OP_READ
242  If iter_is_iovec(iter) Then should_dirty = true
244  Else
245  bottom bits req flags, * top bits REQ_OP. Use * accessors. = dio_bio_write_op(iocb)
246  task_io_account_write(ret)
248  If ki_flags & IOCB_HIPRI Then Mark a bio as polled. Note that for async polled IO, the caller must* expect -EWOULDBLOCK if we cannot allocate a request (or other resources).* We cannot block waiting for requests on polled IO, as those completions* must be found by the caller
251  qc = submit_bio( & bio)
252  cycle
253  set_current_state(TASK_UNINTERRUPTIBLE)
254  If Not READ_ONCE(bi_private) Then Break
256  If Not (ki_flags & IOCB_HIPRI) || Not blk_poll(bdev_get_queue(bdev), qc, true) Then io_schedule()
260  set_current_state() includes a barrier so that the write of current->state* is correctly serialised wrt the caller's subsequent test of whether to* actually sleep:* for (;;) {* set_current_state(TASK_UNINTERRUPTIBLE);* if (!need_sleep)* break;* (Used in tsk->state: )
262  bio_release_pages( & bio, should_dirty)
263  If Value for the false possibility is greater at compile time(bi_status) Then ret = blk_status_to_errno(bi_status)
266  out :
267  If vecs != inline_vecs Then kfree(vecs)
270  bio_uninit( & bio)
272  Return ret
Caller
NameDescribe
blkdev_direct_IO