Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 306017 Details for
Bug 439548
A deadlock can occur between mmap/munmap and journaling(ext3).
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
pagefault disable/enable stuff. Patch 1/2.
pagefault-disable-enable.patch (text/plain), 14.58 KB, created by
Josef Bacik
on 2008-05-19 21:34:05 UTC
(
hide
)
Description:
pagefault disable/enable stuff. Patch 1/2.
Filename:
MIME Type:
Creator:
Josef Bacik
Created:
2008-05-19 21:34:05 UTC
Size:
14.58 KB
patch
obsolete
>Index: linux-rhel4-internal/arch/i386/mm/highmem-xen.c >=================================================================== >--- linux-rhel4-internal.orig/arch/i386/mm/highmem-xen.c >+++ linux-rhel4-internal/arch/i386/mm/highmem-xen.c >@@ -31,7 +31,7 @@ static void *__kmap_atomic_xen(struct pa > unsigned long vaddr; > > /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ >- inc_preempt_count(); >+ pagefault_disable(); > if (page < highmem_start_page) > return page_address(page); > >@@ -64,8 +64,7 @@ void kunmap_atomic(void *kvaddr, enum km > enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); > > if (vaddr < FIXADDR_START) { // FIXME >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > return; > } > #endif >@@ -90,8 +89,7 @@ void kunmap_atomic(void *kvaddr, enum km > pte_clear(kmap_pte-idx); > #endif > >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > } > > struct page *kmap_atomic_to_page(void *ptr) >Index: linux-rhel4-internal/arch/i386/mm/highmem.c >=================================================================== >--- linux-rhel4-internal.orig/arch/i386/mm/highmem.c >+++ linux-rhel4-internal/arch/i386/mm/highmem.c >@@ -31,7 +31,7 @@ void *kmap_atomic(struct page *page, enu > unsigned long vaddr; > > /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ >- inc_preempt_count(); >+ pagefault_disable(); > if (page < highmem_start_page) > return page_address(page); > >@@ -87,8 +87,7 @@ void kunmap_atomic(void *kvaddr, enum km > enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); > > if (vaddr < FIXADDR_START) { // FIXME >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > return; > } > >@@ -103,8 +102,7 @@ void kunmap_atomic(void *kvaddr, enum km > __flush_tlb_one(vaddr); > #endif > >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > } > > struct page *kmap_atomic_to_page(void *ptr) >Index: linux-rhel4-internal/arch/mips/mm/highmem.c >=================================================================== >--- linux-rhel4-internal.orig/arch/mips/mm/highmem.c >+++ linux-rhel4-internal/arch/mips/mm/highmem.c >@@ -40,7 +40,7 @@ void *__kmap_atomic(struct page *page, e > unsigned long vaddr; > > /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ >- inc_preempt_count(); >+ pagefault_disable(); > if (page < highmem_start_page) > return page_address(page); > >@@ -63,8 +63,7 @@ void __kunmap_atomic(void *kvaddr, enum > enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); > > if (vaddr < FIXADDR_START) { // FIXME >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > return; > } > >@@ -79,8 +78,7 @@ void __kunmap_atomic(void *kvaddr, enum > local_flush_tlb_one(vaddr); > #endif > >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > } > > struct page *__kmap_atomic_to_page(void *ptr) >Index: linux-rhel4-internal/arch/sparc/mm/highmem.c >=================================================================== >--- linux-rhel4-internal.orig/arch/sparc/mm/highmem.c >+++ linux-rhel4-internal/arch/sparc/mm/highmem.c >@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enu > unsigned long vaddr; > > /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ >- inc_preempt_count(); >+ pagefault_disable(); > if (page < highmem_start_page) > return page_address(page); > >@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km > unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); > > if (vaddr < FIXADDR_START) { // FIXME >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > return; > } > >@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km > #endif > #endif > >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > } > > /* We may be fed a pagetable here by ptep_to_xxx and others. */ >Index: linux-rhel4-internal/include/asm-ppc/highmem.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-ppc/highmem.h >+++ linux-rhel4-internal/include/asm-ppc/highmem.h >@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct p > unsigned long vaddr; > > /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ >- inc_preempt_count(); >+ pagefault_disable(); > if (page < highmem_start_page) > return page_address(page); > >@@ -103,8 +103,7 @@ static inline void kunmap_atomic(void *k > unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); > > if (vaddr < KMAP_FIX_BEGIN) { // FIXME >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > return; > } > >@@ -117,8 +116,7 @@ static inline void kunmap_atomic(void *k > pte_clear(kmap_pte+idx); > flush_tlb_page(NULL, vaddr); > #endif >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > } > > static inline struct page *kmap_atomic_to_page(void *ptr) >Index: linux-rhel4-internal/kernel/futex.c >=================================================================== >--- linux-rhel4-internal.orig/kernel/futex.c >+++ linux-rhel4-internal/kernel/futex.c >@@ -39,6 +39,7 @@ > #include <linux/mount.h> > #include <linux/pagemap.h> > #include <linux/syscalls.h> >+#include <asm/uaccess.h> > > #define FUTEX_HASHBITS 8 > >@@ -262,10 +263,9 @@ static inline int get_futex_value_locked > { > int ret; > >- inc_preempt_count(); >+ pagefault_disable(); > ret = __copy_from_user_inatomic(dest, from, sizeof(int)); >- dec_preempt_count(); >- preempt_check_resched(); >+ pagefault_enable(); > > return ret ? -EFAULT : 0; > } >Index: linux-rhel4-internal/include/asm-generic/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-generic/uaccess.h >+++ linux-rhel4-internal/include/asm-generic/uaccess.h >@@ -1,6 +1,8 @@ > #ifndef _ASM_GENERIC_UACCESS_H_ > #define _ASM_GENERIC_UACCESS_H_ > >+#include <linux/preempt.h> >+ > /* > * This macro should be used instead of __get_user() when accessing > * values at locations that are not known to be aligned. >@@ -23,4 +25,39 @@ > __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ > }) > >+/* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} >+ >+ > #endif /* _ASM_GENERIC_UACCESS_H */ >Index: linux-rhel4-internal/include/asm-i386/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-i386/uaccess.h >+++ linux-rhel4-internal/include/asm-i386/uaccess.h >@@ -46,6 +46,40 @@ extern struct movsl_mask { > #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) > > /* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} >+ >+/* > * Test whether a block of memory is a valid user space address. > * Returns 0 if the range is valid, nonzero otherwise. > * >Index: linux-rhel4-internal/include/asm-ia64/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-ia64/uaccess.h >+++ linux-rhel4-internal/include/asm-ia64/uaccess.h >@@ -76,6 +76,40 @@ verify_area (int type, const void __user > } > > /* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} >+ >+/* > * These are the main single-value transfer routines. They automatically > * use the right size if we just have the right pointer type. > * >Index: linux-rhel4-internal/include/asm-ppc/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-ppc/uaccess.h >+++ linux-rhel4-internal/include/asm-ppc/uaccess.h >@@ -42,6 +42,39 @@ extern inline int verify_area(int type, > return access_ok(type, addr, size) ? 0 : -EFAULT; > } > >+/* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} > > /* > * The exception table consists of pairs of addresses: the first is the >Index: linux-rhel4-internal/include/asm-ppc64/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-ppc64/uaccess.h >+++ linux-rhel4-internal/include/asm-ppc64/uaccess.h >@@ -61,6 +61,39 @@ static inline int verify_area(int type, > return access_ok(type,addr,size) ? 0 : -EFAULT; > } > >+/* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} > > /* > * The exception table consists of pairs of addresses: the first is the >Index: linux-rhel4-internal/include/asm-s390/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-s390/uaccess.h >+++ linux-rhel4-internal/include/asm-s390/uaccess.h >@@ -72,6 +72,40 @@ extern inline int verify_area(int type, > } > > /* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} >+ >+/* > * The exception table consists of pairs of addresses: the first is the > * address of an instruction that is allowed to fault, and the second is > * the address at which the program should continue. No registers are >Index: linux-rhel4-internal/include/asm-x86_64/uaccess.h >=================================================================== >--- linux-rhel4-internal.orig/include/asm-x86_64/uaccess.h >+++ linux-rhel4-internal/include/asm-x86_64/uaccess.h >@@ -49,6 +49,40 @@ > > #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) > >+/* >+ * These routines enable/disable the pagefault handler in that >+ * it will not take any locks and go straight to the fixup table. >+ * >+ * They have great resemblance to the preempt_disable/enable calls >+ * and in fact they are identical; this is because currently there is >+ * no other way to make the pagefault handlers do this. So we do >+ * disable preemption but we don't necessarily care about that. >+ */ >+static inline void pagefault_disable(void) >+{ >+ inc_preempt_count(); >+ /* >+ * make sure to have issued the store before a pagefault >+ * can hit. >+ */ >+ barrier(); >+} >+ >+static inline void pagefault_enable(void) >+{ >+ /* >+ * make sure to issue those last loads/stores before enabling >+ * the pagefault handler again. >+ */ >+ barrier(); >+ dec_preempt_count(); >+ /* >+ * make sure we do.. >+ */ >+ barrier(); >+ preempt_check_resched(); >+} >+ > extern inline int verify_area(int type, const void __user * addr, unsigned long size) > { > return access_ok(type,addr,size) ? 0 : -EFAULT;
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 439548
: 306017 |
306018
|
308564
|
310833
|
328208
|
328578
|
328771
|
328891