Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 295745 Details for
Bug 249867
Kernel can BUG() in low memory conditions
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
New version of the patch, including batched hypercalls
linux-2.6.9-xen-destroy_contig-memex3.patch (text/plain), 10.33 KB, created by
Chris Lalancette
on 2008-02-24 18:09:11 UTC
(
hide
)
Description:
New version of the patch, including batched hypercalls
Filename:
MIME Type:
Creator:
Chris Lalancette
Created:
2008-02-24 18:09:11 UTC
Size:
10.33 KB
patch
obsolete
>diff -urp linux-2.6.9.orig/arch/i386/mm/hypervisor.c linux-2.6.9/arch/i386/mm/hypervisor.c >--- linux-2.6.9.orig/arch/i386/mm/hypervisor.c 2008-02-24 12:56:11.000000000 -0500 >+++ linux-2.6.9/arch/i386/mm/hypervisor.c 2008-02-24 12:53:03.000000000 -0500 >@@ -86,15 +86,15 @@ void xen_l3_entry_update(pgd_t *ptr, pgd > { > mmu_update_t u; > u.ptr = virt_to_machine(ptr); >- u.val = val.pud; >+ u.val = val.pgd; > BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); > } > >-void xen_l4_entry_update(pgd_t *ptr, pgd_t val) >+void xen_l4_entry_update(pml4_t *ptr, pml4_t val) > { > mmu_update_t u; > u.ptr = virt_to_machine(ptr); >- u.val = val.pgd; >+ u.val = val.pml4; > BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); > } > #endif /* CONFIG_X86_64 */ >@@ -157,7 +157,7 @@ void xen_tlb_flush_mask(cpumask_t *mask) > if ( cpus_empty(*mask) ) > return; > op.cmd = MMUEXT_TLB_FLUSH_MULTI; >- op.arg2.vcpumask = mask->bits; >+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits); > BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); > } > >@@ -176,7 +176,7 @@ void xen_invlpg_mask(cpumask_t *mask, un > return; > op.cmd = MMUEXT_INVLPG_MULTI; > op.arg1.linear_addr = ptr & PAGE_MASK; >- op.arg2.vcpumask = mask->bits; >+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits); > BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); > } > >@@ -261,20 +261,32 @@ static void contiguous_bitmap_clear( > } > } > >+/* Protected by balloon_lock. */ >+#define MAX_CONTIG_ORDER 9 /* 2MB */ >+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; >+static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER]; >+ > /* Ensure multi-page extents are contiguous in machine memory. */ > int xen_create_contiguous_region( > unsigned long vstart, unsigned int order, unsigned int address_bits) > { >- pgd_t *pgd; >- pmd_t *pmd; >- pte_t *pte; >+ unsigned long *in_frames = discontig_frames, out_frame; > unsigned long frame, i, flags; >- struct xen_memory_reservation reservation = { >- .nr_extents = 1, >- .extent_order = 0, >- .domid = DOMID_SELF >+ long rc; >+ int success; >+ struct xen_memory_exchange exchange = { >+ .in = { >+ .nr_extents = 1UL << order, >+ .extent_order = 0, >+ .domid = DOMID_SELF >+ }, >+ .out = { >+ .nr_extents = 1, >+ .extent_order = order, >+ .address_bits = address_bits, >+ .domid = DOMID_SELF >+ } > }; >- set_xen_guest_handle(reservation.extent_start, &frame); > > /* > * Currently an auto-translated guest will not perform I/O, nor will >@@ -284,117 +296,152 @@ int xen_create_contiguous_region( > if (xen_feature(XENFEAT_auto_translated_physmap)) > return 0; > >+ if (unlikely(order > MAX_CONTIG_ORDER)) >+ return -ENOMEM; >+ >+ set_xen_guest_handle(exchange.in.extent_start, in_frames); >+ set_xen_guest_handle(exchange.out.extent_start, &out_frame); >+ > scrub_pages(vstart, 1 << order); > > balloon_lock(flags); > >- /* 1. Zap current PTEs, giving away the underlying pages. */ >- for (i = 0; i < (1<<order); i++) { >- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); >- pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); >- pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); >- frame = pte_mfn(*pte); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); >+ /* 1. Zap current PTEs, remembering MFNs. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i); >+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), >+ __pte_ma(0), 0); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, > INVALID_P2M_ENTRY); >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_decrease_reservation, &reservation) != 1); > } >+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) >+ BUG(); > > /* 2. Get a new contiguous memory extent. */ >- reservation.extent_order = order; >- reservation.address_bits = address_bits; >- frame = __pa(vstart) >> PAGE_SHIFT; >- if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >- &reservation) != 1) >- goto fail; >- >- /* 3. Map the new extent in place of old pages. */ >- for (i = 0; i < (1<<order); i++) { >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame+i, PAGE_KERNEL), 0)); >- set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i); >+ out_frame = __pa(vstart) >> PAGE_SHIFT; >+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); >+ success = (exchange.nr_exchanged == (1UL << order)); >+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); >+ BUG_ON(success && (rc != 0)); >+ if (unlikely(rc == -ENOSYS)) { >+ /* Compatibility when XENMEM_exchange is unsupported. */ >+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, >+ &exchange.in) != (1UL << order)) >+ BUG(); >+ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.out) == 1); >+ if (!success) { >+ /* Couldn't get special memory: fall back to normal. */ >+ for (i = 0; i < (1UL<<order); i++) >+ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i; >+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.in) != (1UL<<order)) >+ BUG(); >+ } > } > >- flush_tlb_all(); >- >- contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order); >- >- balloon_unlock(flags); >- >- return 0; >- >- fail: >- reservation.extent_order = 0; >- reservation.address_bits = 0; >- >- for (i = 0; i < (1<<order); i++) { >- frame = (__pa(vstart) >> PAGE_SHIFT) + i; >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_populate_physmap, &reservation) != 1); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame, PAGE_KERNEL), 0)); >+ /* 3. Map the new extent in place of old pages. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ frame = success ? (out_frame + i) : in_frames[i]; >+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), >+ pfn_pte_ma(frame, PAGE_KERNEL), 0); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); > } > >- flush_tlb_all(); >+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order >+ ? UVMF_TLB_FLUSH|UVMF_ALL >+ : UVMF_INVLPG|UVMF_ALL; >+ >+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) >+ BUG(); >+ >+ if (success) >+ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, >+ 1UL << order); > > balloon_unlock(flags); > >- return -ENOMEM; >+ return success ? 0 : -ENOMEM; > } > > void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) > { >- pgd_t *pgd; >- pmd_t *pmd; >- pte_t *pte; >+ unsigned long *out_frames = discontig_frames, in_frame; > unsigned long frame, i, flags; >- struct xen_memory_reservation reservation = { >- .nr_extents = 1, >- .extent_order = 0, >- .domid = DOMID_SELF >+ long rc; >+ int success; >+ struct xen_memory_exchange exchange = { >+ .in = { >+ .nr_extents = 1, >+ .extent_order = order, >+ .domid = DOMID_SELF >+ }, >+ .out = { >+ .nr_extents = 1UL << order, >+ .extent_order = 0, >+ .domid = DOMID_SELF >+ } > }; >- set_xen_guest_handle(reservation.extent_start, &frame); > >- if (xen_feature(XENFEAT_auto_translated_physmap)) >+ if (xen_feature(XENFEAT_auto_translated_physmap) || >+ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap)) >+ return; >+ >+ if (unlikely(order > MAX_CONTIG_ORDER)) > return; > >+ set_xen_guest_handle(exchange.in.extent_start, &in_frame); >+ set_xen_guest_handle(exchange.out.extent_start, out_frames); >+ > scrub_pages(vstart, 1 << order); > > balloon_lock(flags); > > contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order); > >- /* 1. Zap current PTEs, giving away the underlying pages. */ >- for (i = 0; i < (1<<order); i++) { >- pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); >- pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE))); >- pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); >- frame = pte_mfn(*pte); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); >+ /* 1. Find start MFN of contiguous extent. */ >+ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT); >+ >+ /* 2. Zap current PTEs. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), >+ __pte_ma(0), 0); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, > INVALID_P2M_ENTRY); >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_decrease_reservation, &reservation) != 1); >+ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i; >+ } >+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) >+ BUG(); >+ >+ /* 3. Do the exchange for non-contiguous MFNs. */ >+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); >+ success = (exchange.nr_exchanged == 1); >+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); >+ BUG_ON(success && (rc != 0)); >+ if (unlikely(rc == -ENOSYS)) { >+ /* Compatibility when XENMEM_exchange is unsupported. */ >+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, >+ &exchange.in) != 1) >+ BUG(); >+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap, >+ &exchange.out) != (1UL << order)) >+ BUG(); >+ success = 1; > } > >- /* 2. Map new pages in place of old pages. */ >- for (i = 0; i < (1<<order); i++) { >- frame = (__pa(vstart) >> PAGE_SHIFT) + i; >- BUG_ON(HYPERVISOR_memory_op( >- XENMEM_populate_physmap, &reservation) != 1); >- BUG_ON(HYPERVISOR_update_va_mapping( >- vstart + (i*PAGE_SIZE), >- pfn_pte_ma(frame, PAGE_KERNEL), 0)); >+ /* 4. Map new pages in place of old pages. */ >+ for (i = 0; i < (1UL<<order); i++) { >+ frame = success ? out_frames[i] : (in_frame + i); >+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE), >+ pfn_pte_ma(frame, PAGE_KERNEL), 0); > set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame); > } > >- flush_tlb_all(); >+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order >+ ? UVMF_TLB_FLUSH|UVMF_ALL >+ : UVMF_INVLPG|UVMF_ALL; >+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL)) >+ BUG(); > > balloon_unlock(flags); > } >diff -urp linux-2.6.9.orig/arch/x86_64/mm/Makefile linux-2.6.9/arch/x86_64/mm/Makefile >--- linux-2.6.9.orig/arch/x86_64/mm/Makefile 2008-02-24 12:56:11.000000000 -0500 >+++ linux-2.6.9/arch/x86_64/mm/Makefile 2008-02-24 12:54:44.000000000 -0500 >@@ -7,9 +7,12 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpag > obj-$(CONFIG_DISCONTIGMEM) += numa.o > obj-$(CONFIG_K8_NUMA) += k8topology.o > obj-$(CONFIG_ACPI_NUMA) += srat.o >-obj-$(CONFIG_XEN) += hypervisor.o > > ifdef CONFIG_XEN > include $(srctree)/scripts/Makefile.xen >+ >+hypervisor-y += ../../i386/mm/hypervisor.o >+obj-y += hypervisor.o >+ > obj-y := $(call cherrypickxen, $(obj-y)) > endif
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 249867
:
288721
|
288731
|
295742
|
295745
|
328115