Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:arch\x86\kernel\vm86_32.c Create Date:2022-07-28 08:41:38
Last Modify:2022-05-22 10:11:03 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:do_sys_vm86

Proto:static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)

Type:long

Parameter:

TypeParameterName
struct vm86plus_struct __user *user_vm86
boolplus
240  tsk = current process
241  vm86 = vm86
243  regs = current_pt_regs()
244  err = 0
246  err = security_mmap_addr(0)
247  If err Then
266  pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", comm, task_pid_nr(current process), from_kuid_munged( & init_user_ns, current_uid()))
269  Return -EPERM
272  If Not vm86 Then
273  If Not (vm86 = kzalloc - allocate memory. The memory is set to zero.*@size: how many bytes of memory are required.*@flags: the type of memory to allocate (see kmalloc).) Then Return -ENOMEM
275  vm86 = vm86
277  If saved_sp0 Then Return -EPERM
280  If Not access_ok - Checks if a user space pointer is valid*@addr: User space pointer to start of block to check*@size: Size of block to check* Context: User context only. This function may sleep if pagefaults are* enabled.(user_vm86, plus ? sizeof(structvm86_struct) : sizeof(structvm86plus_struct)) Then Return -EFAULT
285  memset( & vm86regs, 0, size of vm86regs )
286  {get|put}_user_try and catch* get_user_try {* get_user_ex(...);* } get_user_catch(err)
288  get_user_ex(bx, & rmal regs, with special meaning for the segment descriptors..)
289  get_user_ex(cx, & ecx)
290  get_user_ex(dx, & edx)
291  get_user_ex(si, & esi)
292  get_user_ex(di, & edi)
293  get_user_ex(bp, & ebp)
294  get_user_ex(ax, & eax)
295  get_user_ex( Return frame for iretq , & eip)
296  get_user_ex(seg, & cs)
297  cs = seg
298  get_user_ex(flags, & eflags)
299  get_user_ex(sp, & esp)
300  get_user_ex(seg, & ss)
301  ss = seg
302  get_user_ex(hese are specific to v86 mode:, & hese are specific to v86 mode:)
303  get_user_ex(ds, & ds)
304  get_user_ex(fs, & fs)
305  get_user_ex(gs, & gs)
307  get_user_ex(flags, & flags)
308  get_user_ex(screen_bitmap, & screen_bitmap)
309  get_user_ex(cpu_type, & cpu_type)
310  get_user_catch(err)
311  If err Then Return err
314  If copy_from_user( & int_revectored, & int_revectored, sizeof(structrevectored_struct)) Then Return -EFAULT
318  If copy_from_user( & int21_revectored, & int21_revectored, sizeof(structrevectored_struct)) Then Return -EFAULT
322  If plus Then
323  If copy_from_user( & vm86plus, & vm86plus, sizeof(structvm86plus_info_struct)) Then Return -EFAULT
326  for vm86 internal use = 1
327  Else memset( & vm86plus, 0, sizeof(structvm86plus_info_struct))
331  memcpy( & regs32, regs, sizeof(structpt_regs))
332  user_vm86 = user_vm86
339  VEFLAGS = flags
340  flags &= SAFE_MASK
341  flags |= flags & ~SAFE_MASK
342  flags |= No VM86 support
344  * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number: = * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number:
347  Case cpu_type == CPU_286
348  veflags_mask = 0
349  Break
350  Case cpu_type == CPU_386
351  veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL
352  Break
353  Case cpu_type == CPU_486
354  veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL
355  Break
356  Default
357  veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL
358  Break
364  saved_sp0 = sp0
365  lazy_save_gs(gs)
368  Even if we don't have any preemption, we need preempt disable/enable* to be barriers, so that we don't have things like get_user/put_user* that can cause faults and scheduling migrate into our preempt-protected* region.()
369  sp0 += 16
371  If boot_cpu_has(SYSENTER/SYSEXIT ) Then
372  sysenter_cs = 0
373  refresh_sysenter_cs( & * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct.)
376  This is used when switching tasks or entering/exiting vm86 mode.
377  preempt_enable()
379  If flags & lags masks Then mark_screen_rdonly(mm)
382  memcpy((structkernel_vm86_regs * )regs, & vm86regs, size of vm86regs )
383  Force syscall return via IRET by making it look as if there was* some work pending()
384  Return ax