Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 316578 Details for
Bug 377561
CVE-2007-5907 kernel-xen 3.1.1 does not prevent modification of the CR4 TSC from applications (DoS possible)
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Patch to allow guest kernels to trap CR4 access
xen-cr4-tsc.patch (text/plain), 19.54 KB, created by
Chris Lalancette
on 2008-09-12 14:13:05 UTC
(
hide
)
Description:
Patch to allow guest kernels to trap CR4 access
Filename:
MIME Type:
Creator:
Chris Lalancette
Created:
2008-09-12 14:13:05 UTC
Size:
19.54 KB
patch
obsolete
>diff -urp xen.orig/arch/x86/acpi/boot.c xen/arch/x86/acpi/boot.c >--- xen.orig/arch/x86/acpi/boot.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/acpi/boot.c 2008-09-11 08:24:04.000000000 -0400 >@@ -36,6 +36,7 @@ > #include <asm/apic.h> > #include <asm/io.h> > #include <asm/mpspec.h> >+#include <asm/processor.h> > #include <mach_apic.h> > #include <mach_mpparse.h> > >diff -urp xen.orig/arch/x86/domain.c xen/arch/x86/domain.c >--- xen.orig/arch/x86/domain.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/domain.c 2008-09-11 08:48:00.000000000 -0400 >@@ -49,7 +49,8 @@ > #endif > > DEFINE_PER_CPU(struct vcpu *, curr_vcpu); >-DEFINE_PER_CPU(__u64, efer); >+DEFINE_PER_CPU(u64, efer); >+DEFINE_PER_CPU(unsigned long, cr4); > > static void paravirt_ctxt_switch_from(struct vcpu *v); > static void paravirt_ctxt_switch_to(struct vcpu *v); >@@ -385,6 +386,9 @@ int vcpu_initialise(struct vcpu *v) > v->arch.schedule_tail = continue_idle_domain; > v->arch.cr3 = __pa(idle_pg_table); > } >+ >+ v->arch.guest_context.ctrlreg[4] = >+ real_cr4_to_pv_guest_cr4(mmu_cr4_features); > } > > v->arch.perdomain_ptes = >@@ -527,13 +531,29 @@ void arch_domain_destroy(struct domain * > free_xenheap_page(d->shared_info); > } > >+unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4) >+{ >+ unsigned long hv_cr4_mask, hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4()); >+ >+ hv_cr4_mask = ~X86_CR4_TSD; >+ if ( cpu_has_de ) >+ hv_cr4_mask &= ~X86_CR4_DE; >+ >+ if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) ) >+ gdprintk(XENLOG_WARNING, >+ "Attempt to change CR4 flags %08lx -> %08lx\n", >+ hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4); >+ >+ return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask); >+} >+ > /* This is called by arch_final_setup_guest and do_boot_vcpu */ > int arch_set_info_guest( > struct vcpu *v, vcpu_guest_context_u c) > { > struct domain *d = v->domain; > unsigned long cr3_pfn = INVALID_MFN; >- unsigned long flags; >+ unsigned long flags, cr4; > int i, rc = 0, compat; > > /* The context is a compat-mode one if the target domain is compat-mode; >@@ -622,6 +642,11 @@ int arch_set_info_guest( > > /* Ensure real hardware interrupts are enabled. */ > v->arch.guest_context.user_regs.eflags |= EF_IE; >+ >+ cr4 = v->arch.guest_context.ctrlreg[4]; >+ v->arch.guest_context.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(cr4) : >+ real_cr4_to_pv_guest_cr4(mmu_cr4_features); >+ > } > else > { >@@ -1185,9 +1210,15 @@ static void paravirt_ctxt_switch_from(st > > static void paravirt_ctxt_switch_to(struct vcpu *v) > { >+ unsigned long cr4; >+ > set_int80_direct_trap(v); > switch_kernel_stack(v); > >+ cr4 = pv_guest_cr4_to_real_cr4(v->arch.guest_context.ctrlreg[4]); >+ if ( unlikely(cr4 != read_cr4()) ) >+ write_cr4(cr4); >+ > if ( unlikely(v->arch.guest_context.debugreg[7]) ) > { > write_debugreg(0, v->arch.guest_context.debugreg[0]); >diff -urp xen.orig/arch/x86/flushtlb.c xen/arch/x86/flushtlb.c >--- xen.orig/arch/x86/flushtlb.c 2007-12-06 12:48:38.000000000 -0500 >+++ xen/arch/x86/flushtlb.c 2008-09-11 09:06:11.000000000 -0400 >@@ -83,9 +83,12 @@ void write_cr3(unsigned long cr3) > hvm_flush_guest_tlbs(); > > #ifdef USER_MAPPINGS_ARE_GLOBAL >- __pge_off(); >- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); >- __pge_on(); >+ { >+ unsigned long cr4 = read_cr4(); >+ write_cr4(cr4 & ~X86_CR4_PGE); >+ asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); >+ write_cr4(cr4); >+ } > #else > __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); > #endif >@@ -108,8 +111,12 @@ void local_flush_tlb(void) > hvm_flush_guest_tlbs(); > > #ifdef USER_MAPPINGS_ARE_GLOBAL >- __pge_off(); >- __pge_on(); >+ { >+ unsigned long cr4 = read_cr4(); >+ write_cr4(cr4 & ~X86_CR4_PGE); >+ barrier(); >+ write_cr4(cr4); >+ } > #else > __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" ); > #endif >diff -urp xen.orig/arch/x86/genapic/es7000plat.c xen/arch/x86/genapic/es7000plat.c >--- xen.orig/arch/x86/genapic/es7000plat.c 2007-12-06 12:48:38.000000000 -0500 >+++ xen/arch/x86/genapic/es7000plat.c 2008-09-11 13:52:01.000000000 -0400 >@@ -36,6 +36,7 @@ > #include <asm/io.h> > #include <asm/smp.h> > #include <asm/apicdef.h> >+#include <asm/processor.h> > #include "es7000.h" > > /* >diff -urp xen.orig/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/intr.c >--- xen.orig/arch/x86/hvm/svm/intr.c 2008-09-11 08:23:50.000000000 -0400 >+++ xen/arch/x86/hvm/svm/intr.c 2008-09-11 14:00:45.000000000 -0400 >@@ -145,7 +145,7 @@ asmlinkage void svm_intr_assist(void) > * - the guest might look at the APIC/PIC state, so we ought not to have > * cleared the interrupt out of the IRR. > */ >- if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow >+ if ( !(vmcb->rflags & X86_EFLAGS_IF) || vmcb->interrupt_shadow > || vmcb->eventinj.fields.v ) > { > vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; >diff -urp xen.orig/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/svm.c >--- xen.orig/arch/x86/hvm/svm/svm.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/hvm/svm/svm.c 2008-09-11 14:00:12.000000000 -0400 >@@ -623,7 +623,7 @@ static int svm_load_vmcb_ctxt(struct vcp > static int svm_interrupts_enabled(struct vcpu *v) > { > unsigned long eflags = v->arch.hvm_svm.vmcb->rflags; >- return !irq_masked(eflags); >+ return (eflags & X86_EFLAGS_IF); > } > > static int svm_guest_x86_mode(struct vcpu *v) >diff -urp xen.orig/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/intr.c >--- xen.orig/arch/x86/hvm/vmx/intr.c 2008-09-11 08:23:50.000000000 -0400 >+++ xen/arch/x86/hvm/vmx/intr.c 2008-09-11 14:01:37.000000000 -0400 >@@ -166,7 +166,7 @@ asmlinkage void vmx_intr_assist(void) > } > > eflags = __vmread(GUEST_RFLAGS); >- if ( irq_masked(eflags) ) >+ if ( !(eflags & X86_EFLAGS_IF) ) > { > enable_irq_window(v); > return; >diff -urp xen.orig/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmcs.c >--- xen.orig/arch/x86/hvm/vmx/vmcs.c 2008-09-11 08:23:50.000000000 -0400 >+++ xen/arch/x86/hvm/vmx/vmcs.c 2008-09-11 08:24:04.000000000 -0400 >@@ -346,7 +346,7 @@ static void construct_vmcs(struct vcpu * > > /* Host control registers. */ > __vmwrite(HOST_CR0, read_cr0() | X86_CR0_TS); >- __vmwrite(HOST_CR4, read_cr4()); >+ __vmwrite(HOST_CR4, mmu_cr4_features); > > /* Host CS:RIP. */ > __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS); >diff -urp xen.orig/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/vmx.c >--- xen.orig/arch/x86/hvm/vmx/vmx.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/hvm/vmx/vmx.c 2008-09-11 14:01:14.000000000 -0400 >@@ -882,6 +882,10 @@ static void vmx_ctxt_switch_from(struct > > static void vmx_ctxt_switch_to(struct vcpu *v) > { >+ /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */ >+ if ( unlikely(read_cr4() != mmu_cr4_features) ) >+ write_cr4(mmu_cr4_features); >+ > vmx_restore_guest_msrs(v); > vmx_restore_dr(v); > } >@@ -1138,7 +1142,7 @@ static void vmx_init_hypercall_page(stru > static int vmx_interrupts_enabled(struct vcpu *v) > { > unsigned long eflags = __vmread(GUEST_RFLAGS); >- return !irq_masked(eflags); >+ return (eflags & X86_EFLAGS_IF); > } > > >diff -urp xen.orig/arch/x86/setup.c xen/arch/x86/setup.c >--- xen.orig/arch/x86/setup.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/setup.c 2008-09-11 08:24:04.000000000 -0400 >@@ -474,6 +474,8 @@ void __init __start_xen(unsigned long mb > set_current((struct vcpu *)0xfffff000); /* debug sanity */ > idle_vcpu[0] = current; > set_processor_id(0); /* needed early, for smp_processor_id() */ >+ rdmsrl(MSR_EFER, this_cpu(efer)); >+ asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) ); > > smp_prepare_boot_cpu(); > >diff -urp xen.orig/arch/x86/smpboot.c xen/arch/x86/smpboot.c >--- xen.orig/arch/x86/smpboot.c 2008-09-11 08:23:51.000000000 -0400 >+++ xen/arch/x86/smpboot.c 2008-09-11 08:24:04.000000000 -0400 >@@ -489,6 +489,8 @@ void __devinit start_secondary(void *unu > set_processor_id(cpu); > set_current(idle_vcpu[cpu]); > this_cpu(curr_vcpu) = idle_vcpu[cpu]; >+ rdmsrl(MSR_EFER, this_cpu(efer)); >+ asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) ); > > percpu_traps_init(); > >diff -urp xen.orig/arch/x86/smp.c xen/arch/x86/smp.c >--- xen.orig/arch/x86/smp.c 2007-12-06 12:48:38.000000000 -0500 >+++ xen/arch/x86/smp.c 2008-09-11 08:24:04.000000000 -0400 >@@ -86,6 +86,12 @@ static inline void check_IPI_mask(cpumas > ASSERT(!cpus_empty(cpumask)); > } > >+void apic_wait_icr_idle(void) >+{ >+ while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) >+ cpu_relax(); >+} >+ > void send_IPI_mask_flat(cpumask_t cpumask, int vector) > { > unsigned long mask = cpus_addr(cpumask)[0]; >diff -urp xen.orig/arch/x86/traps.c xen/arch/x86/traps.c >--- xen.orig/arch/x86/traps.c 2008-09-11 08:23:50.000000000 -0400 >+++ xen/arch/x86/traps.c 2008-09-11 08:48:53.000000000 -0400 >@@ -1679,10 +1679,9 @@ static int emulate_privileged_op(struct > break; > > case 4: /* Write CR4 */ >- if ( *reg != (read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE)) ) >- gdprintk(XENLOG_WARNING, >- "Attempt to change CR4 flags %08lx -> %08lx\n", >- read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE), *reg); >+ v->arch.guest_context.ctrlreg[4] = pv_guest_cr4_fixup(*reg); >+ write_cr4(pv_guest_cr4_to_real_cr4( >+ v->arch.guest_context.ctrlreg[4])); > break; > > default: >@@ -1764,6 +1763,10 @@ static int emulate_privileged_op(struct > } > break; > >+ case 0x31: /* RDTSC */ >+ rdtsc(regs->eax, regs->edx); >+ break; >+ > case 0x32: /* RDMSR */ > switch ( regs->ecx ) > { >diff -urp xen.orig/include/asm-x86/apic.h xen/include/asm-x86/apic.h >--- xen.orig/include/asm-x86/apic.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/apic.h 2008-09-11 13:51:39.000000000 -0400 >@@ -2,9 +2,7 @@ > #define __ASM_APIC_H > > #include <xen/config.h> >-#include <asm/fixmap.h> > #include <asm/apicdef.h> >-#include <asm/processor.h> > #include <asm/system.h> > > #define Dprintk(x...) >@@ -51,11 +49,7 @@ static __inline u32 apic_read(unsigned l > return *((volatile u32 *)(APIC_BASE+reg)); > } > >-static __inline__ void apic_wait_icr_idle(void) >-{ >- while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) >- cpu_relax(); >-} >+void apic_wait_icr_idle(void); > > int get_physical_broadcast(void); > >diff -urp xen.orig/include/asm-x86/domain.h xen/include/asm-x86/domain.h >--- xen.orig/include/asm-x86/domain.h 2008-09-11 08:23:51.000000000 -0400 >+++ xen/include/asm-x86/domain.h 2008-09-11 08:49:31.000000000 -0400 >@@ -332,6 +332,16 @@ struct arch_vcpu > /* Continue the current hypercall via func(data) on specified cpu. */ > int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data); > >+/* Clean up CR4 bits that are not under guest control. */ >+ unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4); >+ >+/* Convert between guest-visible and real CR4 values. */ >+#define pv_guest_cr4_to_real_cr4(c) \ >+ ((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))) >+#define real_cr4_to_pv_guest_cr4(c) \ >+ ((c) & ~(X86_CR4_PGE | X86_CR4_PSE)) >+ >+ > #endif /* __ASM_DOMAIN_H__ */ > > /* >diff -urp xen.orig/include/asm-x86/flushtlb.h xen/include/asm-x86/flushtlb.h >--- xen.orig/include/asm-x86/flushtlb.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/flushtlb.h 2008-09-11 13:33:51.000000000 -0400 >@@ -74,12 +74,17 @@ extern void write_cr3(unsigned long cr3) > /* Flush guest mappings from the TLB and implicitly tick the tlbflush clock. */ > extern void local_flush_tlb(void); > >+#ifdef USER_MAPPINGS_ARE_GLOBAL >+#define local_flush_tlb_pge() local_flush_tlb() >+#else > #define local_flush_tlb_pge() \ > do { \ >- __pge_off(); \ >+ unsigned long cr4 = read_cr4(); \ >+ write_cr4(cr4 & ~X86_CR4_PGE); \ > local_flush_tlb(); \ >- __pge_on(); \ >+ write_cr4(cr4); \ > } while ( 0 ) >+#endif > > #define local_flush_tlb_one(__addr) \ > __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr))) >diff -urp xen.orig/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/io.h >--- xen.orig/include/asm-x86/hvm/io.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/hvm/io.h 2008-09-11 08:24:04.000000000 -0400 >@@ -137,13 +137,6 @@ static inline int register_portio_handle > return register_io_handler(d, addr, size, action, HVM_PORTIO); > } > >-#if defined(__i386__) || defined(__x86_64__) >-static inline int irq_masked(unsigned long eflags) >-{ >- return ((eflags & X86_EFLAGS_IF) == 0); >-} >-#endif >- > extern void send_pio_req(unsigned long port, unsigned long count, int size, > paddr_t value, int dir, int df, int value_is_ptr); > void send_timeoffset_req(unsigned long timeoff); >diff -urp xen.orig/include/asm-x86/hvm/irq.h xen/include/asm-x86/hvm/irq.h >--- xen.orig/include/asm-x86/hvm/irq.h 2008-09-11 08:23:50.000000000 -0400 >+++ xen/include/asm-x86/hvm/irq.h 2008-09-11 08:24:04.000000000 -0400 >@@ -24,6 +24,7 @@ > > #include <xen/types.h> > #include <xen/spinlock.h> >+#include <asm/irq.h> > #include <asm/hvm/vpic.h> > #include <asm/hvm/vioapic.h> > #include <public/hvm/save.h> >diff -urp xen.orig/include/asm-x86/io_apic.h xen/include/asm-x86/io_apic.h >--- xen.orig/include/asm-x86/io_apic.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/io_apic.h 2008-09-11 08:24:04.000000000 -0400 >@@ -2,9 +2,10 @@ > #define __ASM_IO_APIC_H > > #include <xen/config.h> >-#include <asm/fixmap.h> > #include <asm/types.h> > #include <asm/mpspec.h> >+#include <asm/apicdef.h> >+#include <asm/fixmap.h> > > /* > * Intel IO-APIC support for SMP and UP systems. >diff -urp xen.orig/include/asm-x86/msr.h xen/include/asm-x86/msr.h >--- xen.orig/include/asm-x86/msr.h 2008-09-11 08:23:50.000000000 -0400 >+++ xen/include/asm-x86/msr.h 2008-09-11 08:24:04.000000000 -0400 >@@ -149,16 +149,14 @@ static inline void wrmsrl(unsigned int m > > #ifndef __ASSEMBLY__ > >-DECLARE_PER_CPU(__u64, efer); >+DECLARE_PER_CPU(u64, efer); > >-static inline __u64 read_efer(void) >+static inline u64 read_efer(void) > { >- if (!this_cpu(efer)) >- rdmsrl(MSR_EFER, this_cpu(efer)); > return this_cpu(efer); > } > >-static inline void write_efer(__u64 val) >+static inline void write_efer(u64 val) > { > this_cpu(efer) = val; > wrmsrl(MSR_EFER, val); >diff -urp xen.orig/include/asm-x86/page.h xen/include/asm-x86/page.h >--- xen.orig/include/asm-x86/page.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/page.h 2008-09-11 08:59:01.000000000 -0400 >@@ -294,20 +294,6 @@ void paging_init(void); > void setup_idle_pagetable(void); > #endif /* !defined(__ASSEMBLY__) */ > >-#define __pge_off() \ >- do { \ >- __asm__ __volatile__( \ >- "mov %0, %%cr4; # turn off PGE " \ >- : : "r" (mmu_cr4_features & ~X86_CR4_PGE) ); \ >- } while ( 0 ) >- >-#define __pge_on() \ >- do { \ >- __asm__ __volatile__( \ >- "mov %0, %%cr4; # turn off PGE " \ >- : : "r" (mmu_cr4_features) ); \ >- } while ( 0 ) >- > #define _PAGE_PRESENT 0x001U > #define _PAGE_RW 0x002U > #define _PAGE_USER 0x004U >diff -urp xen.orig/include/asm-x86/processor.h xen/include/asm-x86/processor.h >--- xen.orig/include/asm-x86/processor.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/processor.h 2008-09-11 08:24:04.000000000 -0400 >@@ -8,6 +8,8 @@ > #include <xen/config.h> > #include <xen/cache.h> > #include <xen/types.h> >+#include <xen/smp.h> >+#include <xen/percpu.h> > #include <public/xen.h> > #include <asm/types.h> > #include <asm/cpufeature.h> >@@ -296,16 +298,17 @@ static inline unsigned long read_cr2(voi > return __cr2; > } > >+DECLARE_PER_CPU(unsigned long, cr4); >+ > static inline unsigned long read_cr4(void) > { >- unsigned long __cr4; >- __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4)); >- return __cr4; >-} >+ return this_cpu(cr4); >+} > > static inline void write_cr4(unsigned long val) > { >- __asm__("mov %0,%%cr4": :"r" ((unsigned long)val)); >+ this_cpu(cr4) = val; >+ asm volatile ( "mov %0,%%cr4" : : "r" (val) ); > } > > >@@ -331,24 +334,14 @@ extern unsigned long mmu_cr4_features; > > static always_inline void set_in_cr4 (unsigned long mask) > { >- unsigned long dummy; > mmu_cr4_features |= mask; >- __asm__ __volatile__ ( >- "mov %%cr4,%0\n\t" >- "or %1,%0\n\t" >- "mov %0,%%cr4\n" >- : "=&r" (dummy) : "irg" (mask) ); >+ write_cr4(read_cr4() | mask); > } > > static always_inline void clear_in_cr4 (unsigned long mask) > { >- unsigned long dummy; > mmu_cr4_features &= ~mask; >- __asm__ __volatile__ ( >- "mov %%cr4,%0\n\t" >- "and %1,%0\n\t" >- "mov %0,%%cr4\n" >- : "=&r" (dummy) : "irg" (~mask) ); >+ write_cr4(read_cr4() & ~mask); > } > > /* >diff -urp xen.orig/include/asm-x86/smp.h xen/include/asm-x86/smp.h >--- xen.orig/include/asm-x86/smp.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/smp.h 2008-09-11 08:24:04.000000000 -0400 >@@ -13,7 +13,6 @@ > > #ifdef CONFIG_X86_LOCAL_APIC > #ifndef __ASSEMBLY__ >-#include <asm/fixmap.h> > #include <asm/bitops.h> > #include <asm/mpspec.h> > #ifdef CONFIG_X86_IO_APIC >diff -urp xen.orig/include/asm-x86/x86_32/elf.h xen/include/asm-x86/x86_32/elf.h >--- xen.orig/include/asm-x86/x86_32/elf.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/x86_32/elf.h 2008-09-11 08:24:04.000000000 -0400 >@@ -1,8 +1,6 @@ > #ifndef __X86_32_ELF_H__ > #define __X86_32_ELF_H__ > >-#include <asm/processor.h> >- > typedef struct { > unsigned long ebx; > unsigned long ecx; >@@ -40,7 +38,7 @@ static inline void elf_core_save_regs(EL > asm volatile("movw %%fs, %%ax;" :"=a"(core_regs->fs)); > asm volatile("movw %%gs, %%ax;" :"=a"(core_regs->gs)); > /* orig_eax not filled in for now */ >- core_regs->eip = (unsigned long)current_text_addr(); >+ core_regs->eip = (unsigned long)elf_core_save_regs; > asm volatile("movw %%cs, %%ax;" :"=a"(core_regs->cs)); > asm volatile("pushfl; popl %0" :"=m"(core_regs->eflags)); > asm volatile("movl %%esp,%0" : "=m"(core_regs->esp)); >diff -urp xen.orig/include/asm-x86/x86_64/elf.h xen/include/asm-x86/x86_64/elf.h >--- xen.orig/include/asm-x86/x86_64/elf.h 2007-12-06 12:48:39.000000000 -0500 >+++ xen/include/asm-x86/x86_64/elf.h 2008-09-11 08:24:04.000000000 -0400 >@@ -1,8 +1,6 @@ > #ifndef __X86_64_ELF_H__ > #define __X86_64_ELF_H__ > >-#include <asm/processor.h> >- > typedef struct { > unsigned long r15; > unsigned long r14; >@@ -54,7 +52,7 @@ static inline void elf_core_save_regs(EL > asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi)); > asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi)); > /* orig_rax not filled in for now */ >- core_regs->rip = (unsigned long)current_text_addr(); >+ core_regs->rip = (unsigned long)elf_core_save_regs; > asm volatile("movl %%cs, %%eax;" :"=a"(core_regs->cs)); > asm volatile("pushfq; popq %0" :"=m"(core_regs->eflags)); > asm volatile("movq %%rsp,%0" : "=m"(core_regs->rsp));
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 377561
:
316578