Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 314655 Details for
Bug 459649
[RHEL4] i386: backport the NMI watchdog/performance counter infrastructure
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
RHEL-5 version of i386 support, for reference
nmi_watchdog_i386.patch (text/plain), 24.13 KB, created by
Aristeu Rozanski
on 2008-08-20 20:10:28 UTC
(
hide
)
Description:
RHEL-5 version of i386 support, for reference
Filename:
MIME Type:
Creator:
Aristeu Rozanski
Created:
2008-08-20 20:10:28 UTC
Size:
24.13 KB
patch
obsolete
>--- tree.orig/arch/i386/kernel/Makefile 2008-07-07 15:42:59.000000000 -0400 >+++ tree/arch/i386/kernel/Makefile 2008-07-08 13:12:24.000000000 -0400 >@@ -7,7 +7,8 @@ extra-y := head.o init_task.o vmlinux.ld > obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ > ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ > pci-dma.o i386_ksyms.o i387.o bootflag.o \ >- quirks.o i8237.o topology.o alternative.o i8253.o tsc.o >+ quirks.o i8237.o topology.o alternative.o i8253.o tsc.o \ >+ perfctr-watchdog.o > > obj-$(CONFIG_STACKTRACE) += stacktrace.o > obj-y += cpu/ >@@ -88,6 +89,7 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall. > > k8-y += ../../x86_64/kernel/k8.o > stacktrace-y += ../../x86_64/kernel/stacktrace.o >+perfctr-watchdog-y += ../../x86_64/kernel/perfctr-watchdog.o > > ifdef CONFIG_XEN > include $(srctree)/scripts/Makefile.xen >--- tree.orig/arch/i386/kernel/nmi.c 2008-07-07 15:42:59.000000000 -0400 >+++ tree/arch/i386/kernel/nmi.c 2008-07-08 14:02:01.000000000 -0400 >@@ -24,29 +24,24 @@ > #include <asm/smp.h> > #include <asm/nmi.h> > #include <asm/intel_arch_perfmon.h> >+#include <asm/kdebug.h> > > #include "mach_traps.h" > > unsigned int nmi_watchdog = NMI_NONE; > int unknown_nmi_panic; > static unsigned int nmi_hz = HZ; >-static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ >-static unsigned int nmi_p4_cccr_val; > extern void show_registers(struct pt_regs *regs); > >+static volatile unsigned long lapic_nmi_reserved; >+ > /* >- * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: >- * - it may be reserved by some other driver, or not >- * - when not reserved by some other driver, it may be used for >- * the NMI watchdog, or not >- * >- * This is maintained separately from nmi_active because the NMI >- * watchdog may also be driven from the I/O APIC timer. >+ * the upstream variable 'nmi_active' is called 'nmi_watchdog_active' on >+ * RHEL-5 for kABI reasons. > */ >-static DEFINE_SPINLOCK(lapic_nmi_owner_lock); >-static unsigned int lapic_nmi_owner; >-#define LAPIC_NMI_WATCHDOG (1<<0) >-#define LAPIC_NMI_RESERVED (1<<1) >+atomic_t nmi_watchdog_active = ATOMIC_INIT(0); >+ >+static DEFINE_PER_CPU(short, wd_enabled); > > /* nmi_active: > * +1: the lapic NMI watchdog is active, but can be disabled >@@ -56,48 +51,6 @@ static unsigned int lapic_nmi_owner; > */ > int nmi_active; > >-#define K7_EVNTSEL_ENABLE (1 << 22) >-#define K7_EVNTSEL_INT (1 << 20) >-#define K7_EVNTSEL_OS (1 << 17) >-#define K7_EVNTSEL_USR (1 << 16) >-#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 >-#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING >- >-#define P6_EVNTSEL0_ENABLE (1 << 22) >-#define P6_EVNTSEL_INT (1 << 20) >-#define P6_EVNTSEL_OS (1 << 17) >-#define P6_EVNTSEL_USR (1 << 16) >-#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 >-#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED >- >-#define MSR_P4_MISC_ENABLE 0x1A0 >-#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) >-#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) >-#define MSR_P4_PERFCTR0 0x300 >-#define MSR_P4_CCCR0 0x360 >-#define P4_ESCR_EVENT_SELECT(N) ((N)<<25) >-#define P4_ESCR_OS (1<<3) >-#define P4_ESCR_USR (1<<2) >-#define P4_CCCR_OVF_PMI0 (1<<26) >-#define P4_CCCR_OVF_PMI1 (1<<27) >-#define P4_CCCR_THRESHOLD(N) ((N)<<20) >-#define P4_CCCR_COMPLEMENT (1<<19) >-#define P4_CCCR_COMPARE (1<<18) >-#define P4_CCCR_REQUIRED (3<<16) >-#define P4_CCCR_ESCR_SELECT(N) ((N)<<13) >-#define P4_CCCR_ENABLE (1<<12) >-/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter >- CRU_ESCR0 (with any non-null event selector) through a complemented >- max threshold. [IA32-Vol3, Section 14.9.9] */ >-#define MSR_P4_IQ_COUNTER0 0x30C >-#define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR) >-#define P4_NMI_IQ_CCCR0 \ >- (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ >- P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) >- >-#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL >-#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK >- > static int endflag __initdata = 0; > > #ifdef CONFIG_SMP >@@ -119,33 +72,14 @@ static __init void nmi_cpu_busy(void *da > } > #endif > >-static unsigned int adjust_for_32bit_ctr(unsigned int hz) >-{ >- u64 counter_val; >- unsigned int retval = hz; >- >- /* >- * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter >- * are writable, with higher bits sign extending from bit 31. >- * So, we can only program the counter with 31 bit values and >- * 32nd bit should be 1, for 33.. to be 1. >- * Find the appropriate nmi_hz >- */ >- counter_val = (u64)cpu_khz * 1000; >- do_div(counter_val, retval); >- if (counter_val > 0x7fffffffULL) { >- u64 count = (u64)cpu_khz * 1000; >- do_div(count, 0x7fffffffUL); >- retval = count + 1; >- } >- return retval; >-} >- > static int __init check_nmi_watchdog(void) > { > unsigned int *prev_nmi_count; > int cpu; > >+ if (!atomic_read(&nmi_watchdog_active)) >+ return 0; >+ > if (nmi_watchdog == NMI_NONE) > return 0; > >@@ -164,6 +98,9 @@ static int __init check_nmi_watchdog(voi > mdelay((20*1000)/nmi_hz); // wait 20 ticks > > for_each_possible_cpu(cpu) { >+ if (!per_cpu(wd_enabled, cpu)) >+ continue; >+ > #ifdef CONFIG_SMP > /* Check cpu_callin_map here because that is set > after the timer is started. */ >@@ -177,25 +114,26 @@ static int __init check_nmi_watchdog(voi > cpu, > prev_nmi_count[cpu], > nmi_count(cpu)); >- nmi_active = 0; >- lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; >+ if (atomic_dec_and_test(&nmi_watchdog_active)) >+ nmi_active = 0; >+ per_cpu(wd_enabled, cpu) = 0; > kfree(prev_nmi_count); > return -1; > } > } >+ if (!atomic_read(&nmi_watchdog_active)) { >+ atomic_set(&nmi_watchdog_active, -1); >+ nmi_active = -1; >+ return -1; >+ } >+ > endflag = 1; > printk("OK.\n"); > > /* now that we know it works we can reduce NMI frequency to > something more reasonable; makes a difference in some configs */ >- if (nmi_watchdog == NMI_LOCAL_APIC) { >- >- nmi_hz = 1; >- if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || >- nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { >- nmi_hz = adjust_for_32bit_ctr(nmi_hz); >- } >- } >+ if (nmi_watchdog == NMI_LOCAL_APIC) >+ nmi_hz = lapic_adjust_nmi_hz(1); > > kfree(prev_nmi_count); > return 0; >@@ -209,124 +147,49 @@ static int __init setup_nmi_watchdog(cha > > get_option(&str, &nmi); > >- if (nmi >= NMI_INVALID) >- return 0; >- if (nmi == NMI_NONE) >+ if (nmi >= NMI_NONE && nmi < NMI_INVALID) > nmi_watchdog = nmi; >- /* >- * If any other x86 CPU has a local APIC, then >- * please test the NMI stuff there and send me the >- * missing bits. Right now Intel P6/P4 and AMD K7 only. >- */ >- if ((nmi == NMI_LOCAL_APIC) && >- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && >- (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 >- || boot_cpu_data.x86 == 16)) >- nmi_watchdog = nmi; >- if ((nmi == NMI_LOCAL_APIC) && >- (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && >- (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 >- || boot_cpu_data.x86 == 16)) >- nmi_watchdog = nmi; >- /* >- * We can enable the IO-APIC watchdog >- * unconditionally. >- */ >- if (nmi == NMI_IO_APIC) { >- nmi_active = 1; >- nmi_watchdog = nmi; >- } >+ > return 1; > } > > __setup("nmi_watchdog=", setup_nmi_watchdog); > >-static void disable_intel_arch_watchdog(void); >- >-static void disable_lapic_nmi_watchdog(void) >-{ >- if (nmi_active <= 0) >- return; >- switch (boot_cpu_data.x86_vendor) { >- case X86_VENDOR_AMD: >- wrmsr(MSR_K7_EVNTSEL0, 0, 0); >- break; >- case X86_VENDOR_INTEL: >- if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { >- disable_intel_arch_watchdog(); >- break; >- } >- switch (boot_cpu_data.x86) { >- case 6: >- if (boot_cpu_data.x86_model > 0xd) >- break; >- >- wrmsr(MSR_P6_EVNTSEL0, 0, 0); >- break; >- case 15: >- if (boot_cpu_data.x86_model > 0x4) >- break; >- >- wrmsr(MSR_P4_IQ_CCCR0, 0, 0); >- wrmsr(MSR_P4_CRU_ESCR0, 0, 0); >- break; >- } >- break; >- } >- nmi_active = -1; >- /* tell do_nmi() and others that we're not active any more */ >- nmi_watchdog = 0; >-} >- >-static void enable_lapic_nmi_watchdog(void) >-{ >- if (nmi_active < 0) { >- nmi_watchdog = NMI_LOCAL_APIC; >- setup_apic_nmi_watchdog(); >- } >-} >- >+void acpi_nmi_enable(void); >+void acpi_nmi_disable(void); > int reserve_lapic_nmi(void) > { >- unsigned int old_owner; >- >- spin_lock(&lapic_nmi_owner_lock); >- old_owner = lapic_nmi_owner; >- lapic_nmi_owner |= LAPIC_NMI_RESERVED; >- spin_unlock(&lapic_nmi_owner_lock); >- if (old_owner & LAPIC_NMI_RESERVED) >- return -EBUSY; >- if (old_owner & LAPIC_NMI_WATCHDOG) >- disable_lapic_nmi_watchdog(); >- return 0; >+ if (!test_and_set_bit(1, &lapic_nmi_reserved)) { >+ if (nmi_watchdog == NMI_LOCAL_APIC) >+ disable_lapic_nmi_watchdog(); >+ return 0; >+ } >+ return 1; > } > > void release_lapic_nmi(void) > { >- unsigned int new_owner; >- >- spin_lock(&lapic_nmi_owner_lock); >- new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED; >- lapic_nmi_owner = new_owner; >- spin_unlock(&lapic_nmi_owner_lock); >- if (new_owner & LAPIC_NMI_WATCHDOG) >+ if (nmi_watchdog == NMI_LOCAL_APIC) { > enable_lapic_nmi_watchdog(); >+ touch_nmi_watchdog(); >+ } >+ clear_bit(1, &lapic_nmi_reserved); > } > >+static int old_ioapic_count; > void disable_timer_nmi_watchdog(void) > { >- if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0)) >- return; >- > unset_nmi_callback(); >+ old_ioapic_count = atomic_read(&nmi_watchdog_active); >+ atomic_set(&nmi_watchdog_active, -1); > nmi_active = -1; >- nmi_watchdog = NMI_NONE; > } > > void enable_timer_nmi_watchdog(void) > { >- if (nmi_active < 0) { >- nmi_watchdog = NMI_IO_APIC; >+ if (nmi_watchdog == NMI_IO_APIC && >+ atomic_read(&nmi_watchdog_active) < 0) { >+ atomic_set(&nmi_watchdog_active, old_ioapic_count); > touch_nmi_watchdog(); > nmi_active = 1; > } >@@ -336,17 +199,20 @@ void enable_timer_nmi_watchdog(void) > > static int nmi_pm_active; /* nmi_active before suspend */ > >+void stop_apic_nmi_watchdog(void); > static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) > { > nmi_pm_active = nmi_active; >- disable_lapic_nmi_watchdog(); >+ stop_apic_nmi_watchdog(); > return 0; > } > > static int lapic_nmi_resume(struct sys_device *dev) > { >- if (nmi_pm_active > 0) >- enable_lapic_nmi_watchdog(); >+ if (nmi_pm_active > 0) { >+ setup_apic_nmi_watchdog(); >+ touch_nmi_watchdog(); >+ } > return 0; > } > >@@ -366,7 +232,7 @@ static int __init init_lapic_nmi_sysfs(v > { > int error; > >- if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC) >+ if (atomic_read(&nmi_watchdog_active) == 0 || nmi_watchdog != NMI_LOCAL_APIC) > return 0; > > error = sysdev_class_register(&nmi_sysclass); >@@ -379,207 +245,79 @@ late_initcall(init_lapic_nmi_sysfs); > > #endif /* CONFIG_PM */ > >-/* >- * Activate the NMI watchdog via the local APIC. >- * Original code written by Keith Owens. >- */ >- >-static void clear_msr_range(unsigned int base, unsigned int n) >-{ >- unsigned int i; >- >- for(i = 0; i < n; ++i) >- wrmsr(base+i, 0, 0); >-} >- >-static void write_watchdog_counter(const char *descr) >-{ >- u64 count = (u64)cpu_khz * 1000; >- >- do_div(count, nmi_hz); >- if(descr) >- Dprintk("setting %s to -0x%08Lx\n", descr, count); >- wrmsrl(nmi_perfctr_msr, 0 - count); >-} >- >-static void write_watchdog_counter32(const char *descr) >-{ >- u64 count = (u64)cpu_khz * 1000; >- >- do_div(count, nmi_hz); >- if(descr) >- Dprintk("setting %s to -0x%08Lx\n", descr, count); >- wrmsr(nmi_perfctr_msr, (u32)(-count), 0); >-} >- >-static void setup_k7_watchdog(void) >+void setup_apic_nmi_watchdog (void) > { >- unsigned int evntsel; >- >- nmi_perfctr_msr = MSR_K7_PERFCTR0; >- >- clear_msr_range(MSR_K7_EVNTSEL0, 4); >- clear_msr_range(MSR_K7_PERFCTR0, 4); >- >- evntsel = K7_EVNTSEL_INT >- | K7_EVNTSEL_OS >- | K7_EVNTSEL_USR >- | K7_NMI_EVENT; >+ if (__get_cpu_var(wd_enabled) == 1) >+ return; > >- wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); >- write_watchdog_counter("K7_PERFCTR0"); >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- evntsel |= K7_EVNTSEL_ENABLE; >- wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); >+ switch (nmi_watchdog) { >+ case NMI_LOCAL_APIC: >+ __get_cpu_var(wd_enabled) = 1; >+ if (lapic_watchdog_init(nmi_hz) < 0) { >+ __get_cpu_var(wd_enabled) = 0; >+ return; >+ } >+ /* FALL THROUGH */ >+ case NMI_IO_APIC: >+ __get_cpu_var(wd_enabled) = 1; >+ if (atomic_inc_return(&nmi_watchdog_active) == 1) >+ nmi_active = 1; >+ } > } > >-static void setup_p6_watchdog(void) >+void stop_apic_nmi_watchdog(void) > { >- unsigned int evntsel; >- >- nmi_perfctr_msr = MSR_P6_PERFCTR0; >- >- clear_msr_range(MSR_P6_EVNTSEL0, 2); >- clear_msr_range(MSR_P6_PERFCTR0, 2); >- >- evntsel = P6_EVNTSEL_INT >- | P6_EVNTSEL_OS >- | P6_EVNTSEL_USR >- | P6_NMI_EVENT; >- >- wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); >- nmi_hz = adjust_for_32bit_ctr(nmi_hz); >- write_watchdog_counter32("P6_PERFCTR0"); >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- evntsel |= P6_EVNTSEL0_ENABLE; >- wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); >+ /* only support LOCAL and IO APICs for now */ >+ if ((nmi_watchdog != NMI_LOCAL_APIC) && >+ (nmi_watchdog != NMI_IO_APIC)) >+ return; >+ if (__get_cpu_var(wd_enabled) == 0) >+ return; >+ if (nmi_watchdog == NMI_LOCAL_APIC) >+ lapic_watchdog_stop(); >+ __get_cpu_var(wd_enabled) = 0; >+ if (atomic_dec_and_test(&nmi_watchdog_active)) >+ nmi_active = 0; > } > >-static int setup_p4_watchdog(void) >+static void __acpi_nmi_enable(void *__unused) > { >- unsigned int misc_enable, dummy; >- >- rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy); >- if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) >- return 0; >- >- nmi_perfctr_msr = MSR_P4_IQ_COUNTER0; >- nmi_p4_cccr_val = P4_NMI_IQ_CCCR0; >-#ifdef CONFIG_SMP >- if (smp_num_siblings == 2) >- nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; >-#endif >+ if (__get_cpu_var(wd_enabled) == 1) >+ return; > >- if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) >- clear_msr_range(0x3F1, 2); >- /* MSR 0x3F0 seems to have a default value of 0xFC00, but current >- docs doesn't fully define it, so leave it alone for now. */ >- if (boot_cpu_data.x86_model >= 0x3) { >- /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */ >- clear_msr_range(0x3A0, 26); >- clear_msr_range(0x3BC, 3); >- } else { >- clear_msr_range(0x3A0, 31); >- } >- clear_msr_range(0x3C0, 6); >- clear_msr_range(0x3C8, 6); >- clear_msr_range(0x3E0, 2); >- clear_msr_range(MSR_P4_CCCR0, 18); >- clear_msr_range(MSR_P4_PERFCTR0, 18); >- >- wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); >- wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); >- write_watchdog_counter("P4_IQ_COUNTER0"); >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); >- return 1; >+ __get_cpu_var(wd_enabled) = 1; >+ if (atomic_inc_return(&nmi_watchdog_active) == 1) >+ nmi_active = 1; >+ apic_write(APIC_LVT0, APIC_DM_NMI); > } > >-static void disable_intel_arch_watchdog(void) >+/* >+ * Enable timer based NMIs on all CPUs: >+ */ >+void acpi_nmi_enable(void) > { >- unsigned ebx; >- >- /* >- * Check whether the Architectural PerfMon supports >- * Unhalted Core Cycles Event or not. >- * NOTE: Corresponding bit = 0 in ebp indicates event present. >- */ >- ebx = cpuid_ebx(10); >- if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) >- wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); >+ if (atomic_read(&nmi_watchdog_active) == 0) >+ on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); >+ touch_nmi_watchdog(); > } > >-static int setup_intel_arch_watchdog(void) >+static void __acpi_nmi_disable(void *__unused) > { >- unsigned int evntsel; >- unsigned ebx; >- >- /* >- * Check whether the Architectural PerfMon supports >- * Unhalted Core Cycles Event or not. >- * NOTE: Corresponding bit = 0 in ebp indicates event present. >- */ >- ebx = cpuid_ebx(10); >- if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) >- return 0; >- >- nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; >- >- clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); >- clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); >- >- evntsel = ARCH_PERFMON_EVENTSEL_INT >- | ARCH_PERFMON_EVENTSEL_OS >- | ARCH_PERFMON_EVENTSEL_USR >- | ARCH_PERFMON_NMI_EVENT_SEL >- | ARCH_PERFMON_NMI_EVENT_UMASK; >- >- wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); >- nmi_hz = adjust_for_32bit_ctr(nmi_hz); >- write_watchdog_counter32("INTEL_ARCH_PERFCTR0"); >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; >- wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); >- return 1; >+ if (__get_cpu_var(wd_enabled) == 0) >+ return; >+ apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); >+ __get_cpu_var(wd_enabled) = 0; >+ if (atomic_dec_and_test(&nmi_watchdog_active)) >+ nmi_active = 0; > } > >-void setup_apic_nmi_watchdog (void) >+/* >+ * Disable timer based NMIs on all CPUs: >+ */ >+void acpi_nmi_disable(void) > { >- switch (boot_cpu_data.x86_vendor) { >- case X86_VENDOR_AMD: >- if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && boot_cpu_data.x86 != 16) >- return; >- setup_k7_watchdog(); >- break; >- case X86_VENDOR_INTEL: >- if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { >- if (!setup_intel_arch_watchdog()) >- return; >- break; >- } >- switch (boot_cpu_data.x86) { >- case 6: >- if (boot_cpu_data.x86_model > 0xd) >- return; >- >- setup_p6_watchdog(); >- break; >- case 15: >- if (boot_cpu_data.x86_model > 0x4) >- return; >- >- if (!setup_p4_watchdog()) >- return; >- break; >- default: >- return; >- } >- break; >- default: >- return; >- } >- lapic_nmi_owner = LAPIC_NMI_WATCHDOG; >- nmi_active = 1; >+ if (atomic_read(&nmi_watchdog_active)) >+ on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); > } > > /* >@@ -621,15 +359,14 @@ EXPORT_SYMBOL(touch_nmi_watchdog); > > extern void die_nmi(struct pt_regs *, const char *msg); > >-void nmi_watchdog_tick (struct pt_regs * regs) >+int nmi_watchdog_tick (struct pt_regs * regs) > { >- > /* > * Since current_thread_info()-> is always on the stack, and we > * always switch the stack NMI-atomically, it's safe to use > * smp_processor_id(). > */ >- unsigned int sum; >+ unsigned int sum, rc = 0; > int cpu = smp_processor_id(); > > sum = per_cpu(irq_stat, cpu).apic_timer_irqs; >@@ -649,30 +386,34 @@ void nmi_watchdog_tick (struct pt_regs * > last_irq_sums[cpu] = sum; > alert_counter[cpu] = 0; > } >- if (nmi_perfctr_msr) { >- if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { >- /* >- * P4 quirks: >- * - An overflown perfctr will assert its interrupt >- * until the OVF flag in its CCCR is cleared. >- * - LVTPC is masked on interrupt and must be >- * unmasked by the LVTPC handler. >- */ >- wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- write_watchdog_counter(NULL); >- } >- else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || >- nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { >- /* Only P6 based Pentium M need to re-unmask >- * the apic vector but it doesn't hurt >- * other P6 variant */ >- apic_write(APIC_LVTPC, APIC_DM_NMI); >- write_watchdog_counter32(NULL); >- } else { >- write_watchdog_counter(NULL); >- } >- } >+ >+ /* see if the nmi watchdog went off */ >+ if (!__get_cpu_var(wd_enabled)) >+ return rc; >+ switch (nmi_watchdog) { >+ case NMI_LOCAL_APIC: >+ rc |= lapic_wd_event(nmi_hz); >+ break; >+ case NMI_IO_APIC: >+ /* don't know how to accurately check for this. >+ * just assume it was a watchdog timer interrupt >+ * This matches the old behaviour. >+ */ >+ rc = 1; >+ break; >+ } >+ >+ return rc; >+} >+ >+static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); >+int do_nmi_callback(struct pt_regs *regs, int cpu) >+{ >+#ifdef CONFIG_SYSCTL >+ if (unknown_nmi_panic) >+ unknown_nmi_panic_callback(regs, cpu); >+#endif >+ return 0; > } > > #ifdef CONFIG_SYSCTL >@@ -682,10 +423,9 @@ static int unknown_nmi_panic_callback(st > unsigned char reason = get_nmi_reason(); > char buf[64]; > >- if (!(reason & 0xc0)) { >- sprintf(buf, "NMI received for unknown reason %02x\n", reason); >- die_nmi(regs, buf); >- } >+ sprintf(buf, "NMI received for unknown reason %02x\n", reason); >+ die_nmi(regs, buf); >+ > return 0; > } > >@@ -702,17 +442,18 @@ int proc_unknown_nmi_panic(ctl_table *ta > if (!!old_state == !!unknown_nmi_panic) > return 0; > >- if (unknown_nmi_panic) { >- if (reserve_lapic_nmi() < 0) { >- unknown_nmi_panic = 0; >- return -EBUSY; >- } else { >- set_nmi_callback(unknown_nmi_panic_callback); >- } >- } else { >- release_lapic_nmi(); >- unset_nmi_callback(); >- } >+ if (unknown_nmi_panic) { >+ if (nmi_watchdog == NMI_LOCAL_APIC) >+ disable_lapic_nmi_watchdog(); >+ else if (nmi_watchdog == NMI_IO_APIC) >+ acpi_nmi_disable(); >+ } else { >+ if (nmi_watchdog == NMI_LOCAL_APIC) >+ enable_lapic_nmi_watchdog(); >+ else if (nmi_watchdog == NMI_IO_APIC) >+ acpi_nmi_enable(); >+ } >+ > return 0; > } > >--- tree.orig/include/asm-i386/nmi.h 2008-07-07 15:42:59.000000000 -0400 >+++ tree/include/asm-i386/nmi.h 2008-07-08 13:20:56.000000000 -0400 >@@ -11,6 +11,14 @@ struct pt_regs; > typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); > > /** >+ * do_nmi_callback >+ * >+ * Check to see if a callback exists and execute it. Return 1 >+ * if the handler exists and was handled successfully. >+ */ >+int do_nmi_callback(struct pt_regs *regs, int cpu); >+ >+/** > * set_nmi_callback > * > * Set a handler for an NMI. Only one handler may be >@@ -25,12 +33,27 @@ void set_nmi_callback(nmi_callback_t cal > */ > void unset_nmi_callback(void); > >+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); >+extern int avail_to_resrv_perfctr_nmi(unsigned int); >+extern int reserve_perfctr_nmi(unsigned int); >+extern void release_perfctr_nmi(unsigned int); >+extern int reserve_evntsel_nmi(unsigned int); >+extern void release_evntsel_nmi(unsigned int); >+ >+void lapic_watchdog_stop(void); >+int lapic_watchdog_init(unsigned nmi_hz); >+int lapic_wd_event(unsigned nmi_hz); >+unsigned lapic_adjust_nmi_hz(unsigned hz); >+int lapic_watchdog_ok(void); >+void disable_lapic_nmi_watchdog(void); >+void enable_lapic_nmi_watchdog(void); >+ > extern void setup_apic_nmi_watchdog (void); > extern int reserve_lapic_nmi(void); > extern void release_lapic_nmi(void); > extern void disable_timer_nmi_watchdog(void); > extern void enable_timer_nmi_watchdog(void); >-extern void nmi_watchdog_tick (struct pt_regs * regs); >+extern int nmi_watchdog_tick (struct pt_regs * regs); > > extern unsigned int nmi_watchdog; > #define NMI_DEFAULT -1 >--- tree.orig/include/asm-i386/intel_arch_perfmon.h 2008-07-07 15:42:59.000000000 -0400 >+++ tree/include/asm-i386/intel_arch_perfmon.h 2008-07-08 13:13:46.000000000 -0400 >@@ -14,6 +14,18 @@ > > #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) > #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) >-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) >+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) >+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ >+ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) > >+ >+union cpuid10_eax { >+ struct { >+ unsigned int version_id:8; >+ unsigned int num_counters:8; >+ unsigned int bit_width:8; >+ unsigned int mask_length:8; >+ } split; >+ unsigned int full; >+}; > #endif /* X86_INTEL_ARCH_PERFMON_H */ >--- tree.orig/include/asm-i386/apic.h 2008-07-07 15:42:59.000000000 -0400 >+++ tree/include/asm-i386/apic.h 2008-07-08 13:12:24.000000000 -0400 >@@ -107,6 +107,7 @@ extern void cache_APIC_registers (void); > extern void sync_Arb_IDs (void); > extern void init_bsp_APIC (void); > extern void setup_local_APIC (void); >+extern void stop_apic_nmi_watchdog(void); > extern void init_apic_mappings (void); > extern void smp_local_timer_interrupt (struct pt_regs * regs); > extern void setup_boot_APIC_clock (void); >--- tree.orig/arch/i386/kernel/apic.c 2008-07-07 15:42:59.000000000 -0400 >+++ tree/arch/i386/kernel/apic.c 2008-07-08 13:12:24.000000000 -0400 >@@ -586,8 +586,7 @@ void __devinit setup_local_APIC(void) > printk("No ESR for 82489DX.\n"); > } > >- if (nmi_watchdog == NMI_LOCAL_APIC) >- setup_apic_nmi_watchdog(); >+ setup_apic_nmi_watchdog(); > apic_pm_activate(); > } > >--- tree.orig/arch/i386/kernel/traps.c 2008-07-07 15:42:59.000000000 -0400 >+++ tree/arch/i386/kernel/traps.c 2008-07-08 13:20:56.000000000 -0400 >@@ -811,26 +811,26 @@ void die_nmi (struct pt_regs *regs, cons > static void default_do_nmi(struct pt_regs * regs) > { > unsigned char reason = 0; >+ int cpu = smp_processor_id(); > > /* Only the BSP gets external NMIs from the system. */ >- if (!smp_processor_id()) >+ if (!cpu) > reason = get_nmi_reason(); > > if (!(reason & 0xc0)) { > if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) > == NOTIFY_STOP) > return; >-#ifdef CONFIG_X86_LOCAL_APIC >+ > /* > * Ok, so this is none of the documented NMI sources, > * so it must be the NMI watchdog. > */ >- if (nmi_watchdog) { >- nmi_watchdog_tick(regs); >+ if (nmi_watchdog_tick(regs)) > return; >- } >-#endif >- unknown_nmi_error(reason, regs); >+ >+ if (!do_nmi_callback(regs, cpu)) >+ unknown_nmi_error(reason, regs); > return; > } > if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 459649
: 314655