Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 290461 Details for
Bug 254024
sys_vm86 syscall in RHEL5 not reliable
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Jermey's vm86 patch backport for RHEL5-58
backport-jermey-vm86-changes-v2.patch (text/plain), 10.37 KB, created by
Vivek Goyal
on 2007-12-27 22:45:13 UTC
(
hide
)
Description:
Jermey's vm86 patch backport for RHEL5-58
Filename:
MIME Type:
Creator:
Vivek Goyal
Created:
2007-12-27 22:45:13 UTC
Size:
10.37 KB
patch
obsolete
> >--- > > >Signed-off-by: Vivek Goyal <vgoyal@redhat.com> >--- > > arch/i386/kernel/vm86.c | 110 ++++++++++++++++++++++++++++++------------------ > include/asm-i386/vm86.h | 17 ------- > 2 files changed, 71 insertions(+), 56 deletions(-) > >diff -puN arch/i386/kernel/vm86.c~backport-jermey-vm86-changes arch/i386/kernel/vm86.c >--- linux-2.6.18.i386-el5.58-vivek/arch/i386/kernel/vm86.c~backport-jermey-vm86-changes 2007-12-27 16:17:47.000000000 -0500 >+++ linux-2.6.18.i386-el5.58-vivek-root/arch/i386/kernel/vm86.c 2007-12-27 17:25:24.000000000 -0500 >@@ -43,6 +43,7 @@ > #include <linux/highmem.h> > #include <linux/ptrace.h> > #include <linux/audit.h> >+#include <linux/stddef.h> > > #include <asm/uaccess.h> > #include <asm/io.h> >@@ -72,10 +73,10 @@ > /* > * 8- and 16-bit register defines.. > */ >-#define AL(regs) (((unsigned char *)&((regs)->eax))[0]) >-#define AH(regs) (((unsigned char *)&((regs)->eax))[1]) >-#define IP(regs) (*(unsigned short *)&((regs)->eip)) >-#define SP(regs) (*(unsigned short *)&((regs)->esp)) >+#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) >+#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) >+#define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) >+#define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) > > /* > * virtual flags (16 and 32-bit versions) >@@ -89,10 +90,37 @@ > #define SAFE_MASK (0xDD5) > #define RETURN_MASK (0xDFF) > >-#define VM86_REGS_PART2 orig_eax >-#define VM86_REGS_SIZE1 \ >- ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) ) >-#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) >+/* convert kernel_vm86_regs to vm86_regs */ >+static int copy_vm86_regs_to_user(struct vm86_regs __user *user, >+ const struct kernel_vm86_regs *regs) >+{ >+ int ret = 0; >+ >+ /* kernel_vm86_regs is missing xfs, so copy everything up to >+ (but not including) xgs, and then rest after xgs. */ >+ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); >+ ret += copy_to_user(&user->orig_eax, ®s->pt.orig_eax, >+ sizeof(struct kernel_vm86_regs) - >+ offsetof(struct kernel_vm86_regs, pt.orig_eax)); >+ >+ return ret; >+} >+ >+/* convert vm86_regs to kernel_vm86_regs */ >+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, >+ const struct vm86_regs __user *user, >+ unsigned extra) >+{ >+ int ret = 0; >+ >+ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); >+ ret += copy_from_user(®s->pt.orig_eax, &user->orig_eax, >+ sizeof(struct kernel_vm86_regs) - >+ offsetof(struct kernel_vm86_regs, pt.orig_eax) + >+ extra); >+ >+ return ret; >+} > > struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); > struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) >@@ -114,10 +142,8 @@ struct pt_regs * fastcall save_v86_state > printk("no vm86_info: BAD\n"); > do_exit(SIGSEGV); > } >- set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); >- tmp = copy_to_user(¤t->thread.vm86_info->regs,regs, VM86_REGS_SIZE1); >- tmp += copy_to_user(¤t->thread.vm86_info->regs.VM86_REGS_PART2, >- ®s->VM86_REGS_PART2, VM86_REGS_SIZE2); >+ set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); >+ tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); > tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); > if (tmp) { > printk("vm86: could not access userspace vm86_info\n"); >@@ -189,9 +215,10 @@ asmlinkage int sys_vm86old(struct pt_reg > tsk = current; > if (tsk->thread.saved_esp0) > goto out; >- tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); >- tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, >- (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2); >+ tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, >+ offsetof(struct kernel_vm86_struct, vm86plus) - >+ sizeof(info.regs)); >+ > ret = -EFAULT; > if (tmp) > goto out; >@@ -239,9 +266,10 @@ asmlinkage int sys_vm86(struct pt_regs r > if (tsk->thread.saved_esp0) > goto out; > v86 = (struct vm86plus_struct __user *)regs.ecx; >- tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); >- tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, >- (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2); >+ tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, >+ offsetof(struct kernel_vm86_struct, regs32) - >+ sizeof(info.regs)); >+ > ret = -EFAULT; > if (tmp) > goto out; >@@ -264,8 +292,8 @@ static void do_sys_vm86(struct kernel_vm > /* > * make sure the vm86() system call doesn't try to do anything silly > */ >- info->regs.__null_ds = 0; >- info->regs.__null_es = 0; >+ info->regs.pt.xds = 0; >+ info->regs.pt.xes = 0; > > /* we are clearing fs,gs later just before "jmp resume_userspace", > * because starting with Linux 2.1.x they aren't no longer saved/restored >@@ -276,10 +304,10 @@ static void do_sys_vm86(struct kernel_vm > * has set it up safely, so this makes sure interrupt etc flags are > * inherited from protected mode. > */ >- VEFLAGS = info->regs.eflags; >- info->regs.eflags &= SAFE_MASK; >- info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK; >- info->regs.eflags |= VM_MASK; >+ VEFLAGS = info->regs.pt.eflags; >+ info->regs.pt.eflags &= SAFE_MASK; >+ info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; >+ info->regs.pt.eflags |= VM_MASK; > > switch (info->cpu_type) { > case CPU_286: >@@ -318,19 +346,19 @@ static void do_sys_vm86(struct kernel_vm > tsk->thread.screen_bitmap = info->screen_bitmap; > if (info->flags & VM86_SCREEN_BITMAP) > mark_screen_rdonly(tsk->mm); >- __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t"); >- __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax)); > > /*call audit_syscall_exit since we do not exit via the normal paths */ > if (unlikely(current->audit_context)) >- audit_syscall_exit(AUDITSC_RESULT(eax), eax); >+ audit_syscall_exit(AUDITSC_RESULT(0), 0); > > __asm__ __volatile__( > "movl %0,%%esp\n\t" > "movl %1,%%ebp\n\t" >+ "mov %2, %%fs\n\t" >+ "mov %3, %%gs\n\t" > "jmp resume_userspace" > : /* no outputs */ >- :"r" (&info->regs), "r" (task_thread_info(tsk))); >+ :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0), "r" (0)); > /* we never return here */ > } > >@@ -360,12 +388,12 @@ static inline void clear_IF(struct kerne > > static inline void clear_TF(struct kernel_vm86_regs * regs) > { >- regs->eflags &= ~TF_MASK; >+ regs->pt.eflags &= ~TF_MASK; > } > > static inline void clear_AC(struct kernel_vm86_regs * regs) > { >- regs->eflags &= ~AC_MASK; >+ regs->pt.eflags &= ~AC_MASK; > } > > /* It is correct to call set_IF(regs) from the set_vflags_* >@@ -382,7 +410,7 @@ static inline void clear_AC(struct kerne > static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) > { > set_flags(VEFLAGS, eflags, current->thread.v86mask); >- set_flags(regs->eflags, eflags, SAFE_MASK); >+ set_flags(regs->pt.eflags, eflags, SAFE_MASK); > if (eflags & IF_MASK) > set_IF(regs); > else >@@ -392,7 +420,7 @@ static inline void set_vflags_long(unsig > static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) > { > set_flags(VFLAGS, flags, current->thread.v86mask); >- set_flags(regs->eflags, flags, SAFE_MASK); >+ set_flags(regs->pt.eflags, flags, SAFE_MASK); > if (flags & IF_MASK) > set_IF(regs); > else >@@ -401,7 +429,7 @@ static inline void set_vflags_short(unsi > > static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) > { >- unsigned long flags = regs->eflags & RETURN_MASK; >+ unsigned long flags = regs->pt.eflags & RETURN_MASK; > > if (VEFLAGS & VIF_MASK) > flags |= IF_MASK; >@@ -505,7 +533,7 @@ static void do_int(struct kernel_vm86_re > unsigned long __user *intr_ptr; > unsigned long segoffs; > >- if (regs->cs == BIOSSEG) >+ if (regs->pt.xcs == BIOSSEG) > goto cannot_handle; > if (is_revectored(i, &KVM86->int_revectored)) > goto cannot_handle; >@@ -517,9 +545,9 @@ static void do_int(struct kernel_vm86_re > if ((segoffs >> 16) == BIOSSEG) > goto cannot_handle; > pushw(ssp, sp, get_vflags(regs), cannot_handle); >- pushw(ssp, sp, regs->cs, cannot_handle); >+ pushw(ssp, sp, regs->pt.xcs, cannot_handle); > pushw(ssp, sp, IP(regs), cannot_handle); >- regs->cs = segoffs >> 16; >+ regs->pt.xcs = segoffs >> 16; > SP(regs) -= 6; > IP(regs) = segoffs & 0xffff; > clear_TF(regs); >@@ -536,7 +564,7 @@ int handle_vm86_trap(struct kernel_vm86_ > if (VMPI.is_vm86pus) { > if ( (trapno==3) || (trapno==1) ) > return_to_32bit(regs, VM86_TRAP + (trapno << 8)); >- do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs)); >+ do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); > return 0; > } > if (trapno !=1) >@@ -565,10 +593,10 @@ void handle_vm86_fault(struct kernel_vm8 > handle_vm86_trap(regs, 0, 1); \ > return; } while (0) > >- orig_flags = *(unsigned short *)®s->eflags; >+ orig_flags = *(unsigned short *)®s->pt.eflags; > >- csp = (unsigned char __user *) (regs->cs << 4); >- ssp = (unsigned char __user *) (regs->ss << 4); >+ csp = (unsigned char __user *) (regs->pt.xcs << 4); >+ ssp = (unsigned char __user *) (regs->pt.xss << 4); > sp = SP(regs); > ip = IP(regs); > >@@ -655,7 +683,7 @@ void handle_vm86_fault(struct kernel_vm8 > SP(regs) += 6; > } > IP(regs) = newip; >- regs->cs = newcs; >+ regs->pt.xcs = newcs; > CHECK_IF_IN_TRAP; > if (data32) { > set_vflags_long(newflags, regs); >diff -puN include/asm-i386/vm86.h~backport-jermey-vm86-changes include/asm-i386/vm86.h >--- linux-2.6.18.i386-el5.58-vivek/include/asm-i386/vm86.h~backport-jermey-vm86-changes 2007-12-27 16:17:47.000000000 -0500 >+++ linux-2.6.18.i386-el5.58-vivek-root/include/asm-i386/vm86.h 2007-12-27 16:17:47.000000000 -0500 >@@ -68,6 +68,7 @@ > * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout > * is 'kernel_vm86_regs' (see below). > */ >+#include <asm/ptrace.h> > > struct vm86_regs { > /* >@@ -150,21 +151,7 @@ struct kernel_vm86_regs { > /* > * normal regs, with special meaning for the segment descriptors.. > */ >- long ebx; >- long ecx; >- long edx; >- long esi; >- long edi; >- long ebp; >- long eax; >- long __null_ds; >- long __null_es; >- long orig_eax; >- long eip; >- unsigned short cs, __csh; >- long eflags; >- long esp; >- unsigned short ss, __ssh; >+ struct pt_regs pt; > /* > * these are specific to v86 mode: > */ >_
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 254024
:
206251
|
289391
|
289411
|
289806
|
289807
|
290180
|
290181
|
290211
|
290212
| 290461 |
290688
|
291734
|
292069