Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 295742 Details for
Bug 249867
Kernel can BUG() in low memory conditions
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Combined patch, rebased against the latest RHEL-4 HEAD
linux-2.6.9-xen-destroy_contig-memex.patch (text/plain), 8.18 KB, created by
Chris Lalancette
on 2008-02-24 16:57:49 UTC
(
hide
)
Description:
Combined patch, rebased against the latest RHEL-4 HEAD
Filename:
MIME Type:
Creator:
Chris Lalancette
Created:
2008-02-24 16:57:49 UTC
Size:
8.18 KB
patch
obsolete
>diff -urp linux-2.6.9.orig/arch/i386/mm/hypervisor.c linux-2.6.9/arch/i386/mm/hypervisor.c >--- linux-2.6.9.orig/arch/i386/mm/hypervisor.c 2008-02-24 11:48:22.000000000 -0500 >+++ linux-2.6.9/arch/i386/mm/hypervisor.c 2008-02-24 11:50:43.000000000 -0500 >@@ -261,6 +261,10 @@ static void contiguous_bitmap_clear( > } > } > >+/* Protected by balloon_lock. */ >+#define MAX_CONTIG_ORDER 7 >+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; >+ > /* Ensure multi-page extents are contiguous in machine memory. */ > int xen_create_contiguous_region( > unsigned long vstart, unsigned int order, unsigned int address_bits) >@@ -268,13 +272,23 @@ int xen_create_contiguous_region( > pgd_t *pgd; > pmd_t *pmd; > pte_t *pte; >+ unsigned long *in_frames = discontig_frames, out_frame; > unsigned long frame, i, flags; >- struct xen_memory_reservation reservation = { >- .nr_extents = 1, >- .extent_order = 0, >- .domid = DOMID_SELF >+ long rc; >+ int success; >+ struct xen_memory_exchange exchange = { >+ .in = { >+ .nr_extents = 1UL << order, >+ .extent_order = 0, >+ .domid = DOMID_SELF >+ }, >+ .out = { >+ .nr_extents = 1, >+ .extent_order = order, >+ .address_bits = address_bits, >+ .domid = DOMID_SELF >+ } > }; >- set_xen_guest_handle(reservation.extent_start, &frame); > > /* > * Currently an auto-translated guest will not perform I/O, nor will >@@ -284,67 +298,72 @@ int xen_create_contiguous_region( > if (xen_feature(XENFEAT_auto_translated_physmap)) > return 0; > >+ if (order > MAX_CONTIG_ORDER) >+ return -ENOMEM; >+ >+ set_xen_guest_handle(exchange.in.extent_start, in_frames); >+ set_xen_guest_handle(exchange.out.extent_start, &out_frame); >+ > scrub_pages(vstart, 1 << order); > > balloon_lock(flags); > >- /* 1. Zap current PTEs, giving away the underlying pages. */ >- for (i = 0; i < (1<<order); i++) { >+ /* 1. Zap current PTEs, remembering MFNs. */ >+ for (i = 0; i < (1UL<<order); i++) { > pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); > pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); > pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); >- frame = pte_mfn(*pte); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); >+ in_frames[i] = pte_mfn(*pte); >+ if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), >+ __pte_ma(0), 0)) >+ BUG(); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, > INVALID_P2M_ENTRY); >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_decrease_reservation, &reservation) != 1); > } > > /* 2. Get a new contiguous memory extent. */ >- reservation.extent_order = order; >- reservation.address_bits = address_bits; >- frame = __pa(vstart) >> PAGE_SHIFT; >- if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >- &reservation) != 1) >- goto fail; >- >- /* 3. Map the new extent in place of old pages. */ >- for (i = 0; i < (1<<order); i++) { >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame+i, PAGE_KERNEL), 0)); >- set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i); >+ out_frame = __pa(vstart) >> PAGE_SHIFT; >+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); >+ success = (exchange.nr_exchanged == (1UL << order)); >+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); >+ BUG_ON(success && (rc != 0)); >+ if (unlikely(rc == -ENOSYS)) { >+ /* Compatibility when XENMEM_exchange is unsupported. */ >+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, >+ &exchange.in) != (1UL << order)) >+ BUG(); >+ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.out) == 1); >+ if (!success) { >+ /* Couldn't get special memory: fall back to normal. */ >+ for (i = 0; i < (1UL<<order); i++) >+ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i; >+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.in) != (1UL<<order)) >+ BUG(); >+ } > } > >- flush_tlb_all(); >- >- contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order); >- >- balloon_unlock(flags); >- >- return 0; >- >- fail: >- reservation.extent_order = 0; >- reservation.address_bits = 0; >- >- for (i = 0; i < (1<<order); i++) { >- frame = (__pa(vstart) >> PAGE_SHIFT) + i; >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_populate_physmap, &reservation) != 1); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame, PAGE_KERNEL), 0)); >+ /* 3. Map the new extent in place of old pages. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ frame = success ? (out_frame + i) : in_frames[i]; >+ if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), >+ pfn_pte_ma(frame, >+ PAGE_KERNEL), >+ 0)) >+ BUG(); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); > } > > flush_tlb_all(); > >+ if (success) >+ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, >+ 1UL << order); >+ > balloon_unlock(flags); > >- return -ENOMEM; >+ return success ? 0 : -ENOMEM; > } > > void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) >@@ -352,45 +371,78 @@ void xen_destroy_contiguous_region(unsig > pgd_t *pgd; > pmd_t *pmd; > pte_t *pte; >+ unsigned long *out_frames = discontig_frames, in_frame; > unsigned long frame, i, flags; >- struct xen_memory_reservation reservation = { >- .nr_extents = 1, >- .extent_order = 0, >- .domid = DOMID_SELF >+ long rc; >+ int success; >+ struct xen_memory_exchange exchange = { >+ .in = { >+ .nr_extents = 1, >+ .extent_order = order, >+ .domid = DOMID_SELF >+ }, >+ .out = { >+ .nr_extents = 1UL << order, >+ .extent_order = 0, >+ .domid = DOMID_SELF >+ } > }; >- set_xen_guest_handle(reservation.extent_start, &frame); > >- if (xen_feature(XENFEAT_auto_translated_physmap)) >+ if (xen_feature(XENFEAT_auto_translated_physmap) || >+ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap)) >+ return; >+ >+ if (order > MAX_CONTIG_ORDER) > return; > >+ set_xen_guest_handle(exchange.in.extent_start, &in_frame); >+ set_xen_guest_handle(exchange.out.extent_start, out_frames); >+ > scrub_pages(vstart, 1 << order); > > balloon_lock(flags); > > contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order); > >- /* 1. Zap current PTEs, giving away the underlying pages. */ >- for (i = 0; i < (1<<order); i++) { >- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); >- pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); >- pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); >- frame = pte_mfn(*pte); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); >+ /* 1. Find start MFN of contiguous extent. */ >+ pgd = pgd_offset_k(vstart); >+ pmd = pmd_offset(pgd, vstart); >+ pte = pte_offset_kernel(pmd, vstart); >+ in_frame = pte_mfn(*pte); >+ >+ /* 2. Zap current PTEs. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), >+ __pte_ma(0), 0)); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, > INVALID_P2M_ENTRY); >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_decrease_reservation, &reservation) != 1); >+ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i; > } > >- /* 2. Map new pages in place of old pages. */ >- for (i = 0; i < (1<<order); i++) { >- frame = (__pa(vstart) >> PAGE_SHIFT) + i; >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_populate_physmap, &reservation) != 1); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame, PAGE_KERNEL), 0)); >+ /* 3. Do the exchange for non-contiguous MFNs. */ >+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); >+ success = (exchange.nr_exchanged == 1); >+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); >+ BUG_ON(success && (rc != 0)); >+ if (rc == -ENOSYS) { >+ /* Compatibility when XENMEM_exchange is unsupported. */ >+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, >+ &exchange.in) != 1) >+ BUG(); >+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.out) != (1UL << order)) >+ BUG(); >+ success = 1; >+ } >+ >+ /* 4. Map new pages in place of old pages. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ frame = success ? out_frames[i] : (in_frame + i); >+ if (HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), >+ pfn_pte_ma(frame, >+ PAGE_KERNEL), >+ 0)) >+ BUG(); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); > } > >Only in linux-2.6.9/arch/i386/mm: hypervisor.c.orig
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 249867
:
288721
|
288731
|
295742
|
295745
|
328115