# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxxxxx>
# Date 1182958296 -3600
# Node ID 0be610b725fae4cd6de8f0b111660a186f93b86d
# Parent 164599b81cfdc27602072442a0e21f746f7ea63b
Fix kexec compatibility with highmem.
Stop abusing xen_create_contiguous_region() to move pages below the
MFN limit. Instead introduce xen_limit_pages_to_max_mfn() which works
for both low and highmem but doesn't bother making the pages
contiguous.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxxxxx>
---
arch/i386/mm/hypervisor.c | 111 +++++++++++++++++++++++++++++
include/asm-i386/mach-xen/asm/hypervisor.h | 5 +
kernel/kexec.c | 3
3 files changed, 117 insertions(+), 2 deletions(-)
diff -r 164599b81cfd -r 0be610b725fa arch/i386/mm/hypervisor.c
--- a/arch/i386/mm/hypervisor.c Wed Jun 27 12:51:35 2007 +0100
+++ b/arch/i386/mm/hypervisor.c Wed Jun 27 16:31:36 2007 +0100
@@ -42,6 +42,7 @@
#include <linux/module.h>
#include <linux/percpu.h>
#include <asm/tlbflush.h>
+#include <linux/highmem.h>
void xen_l1_entry_update(pte_t *ptr, pte_t val)
{
@@ -234,6 +235,7 @@ static void contiguous_bitmap_clear(
/* Protected by balloon_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
/* Ensure multi-page extents are contiguous in machine memory. */
@@ -421,6 +423,115 @@ void xen_destroy_contiguous_region(unsig
}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+int xen_limit_pages_to_max_mfn(
+ struct page *pages, unsigned int order, unsigned int address_bits)
+{
+ unsigned long flags, frame;
+ unsigned long *in_frames = discontig_frames, *out_frames =
limited_frames;
+ void *v;
+ struct page *page;
+ int i, nr_mcl, rc, success;
+
+ struct xen_memory_exchange exchange = {
+ .in = {
+ .nr_extents = 1UL << order,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ },
+ .out = {
+ .nr_extents = 1UL << order,
+ .extent_order = 0,
+ .address_bits = address_bits,
+ .domid = DOMID_SELF
+ }
+ };
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ if (unlikely(order > MAX_CONTIG_ORDER))
+ return -ENOMEM;
+
+ set_xen_guest_handle(exchange.in.extent_start, in_frames);
+ set_xen_guest_handle(exchange.out.extent_start, out_frames);
+
+ /* 0. Scrub the pages. */
+ for ( i = 0 ; i < 1UL<<order ; i++ ) {
+ page = &pages[i];
+
+ if (!PageHighMem(page)) {
+ v = page_address(page);
+ scrub_pages(v, 1);
+ } else {
+ v = kmap(page);
+ scrub_pages(v, 1);
+ kunmap(page);
+ }
+ }
+
+ kmap_flush_unused();
+
+ balloon_lock(flags);
+
+ /* 1. Zap current PTEs (if any), remembering MFNs. */
+ for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
+ page = &pages[i];
+
+ out_frames[i] = page_to_pfn(page);
+ in_frames[i] = pfn_to_mfn(out_frames[i]);
+
+ if (!PageHighMem(page))
+ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
+ (unsigned
long)page_address(page),
+ __pte_ma(0), 0);
+
+ set_phys_to_machine(out_frames[i], INVALID_P2M_ENTRY);
+ }
+ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+ BUG();
+
+ /* 2. Get new memory below the required limit. */
+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
+ success = (exchange.nr_exchanged == (1UL << order));
+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
+ BUG_ON(success && (rc != 0));
+#if CONFIG_XEN_COMPAT <= 0x030002
+ if (unlikely(rc == -ENOSYS)) {
+ /* Compatibility when XENMEM_exchange is unsupported. */
+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &exchange.in) != (1UL << order))
+ BUG();
+ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
+ &exchange.out) != (1UL
<<order));
+ }
+#endif
+
+ /* 3. Map the new pages in place of old pages. */
+ for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
+ page = &pages[i];
+ unsigned long pfn = page_to_pfn(page);
+
+ frame = success ? out_frames[i] : in_frames[i];
+
+ if (!PageHighMem(page))
+ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
+ (unsigned
long)page_address(page),
+ pfn_pte_ma(frame, PAGE_KERNEL),
0);
+
+ set_phys_to_machine(pfn, frame);
+ }
+ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
+ ?
UVMF_TLB_FLUSH|UVMF_ALL
+ : UVMF_INVLPG|UVMF_ALL;
+ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+ BUG();
+
+ balloon_unlock(flags);
+
+ return success ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
+
#ifdef __i386__
int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
{
diff -r 164599b81cfd -r 0be610b725fa include/asm-i386/mach-xen/asm/hypervisor.h
--- a/include/asm-i386/mach-xen/asm/hypervisor.h Wed Jun 27 12:51:35
2007 +0100
+++ b/include/asm-i386/mach-xen/asm/hypervisor.h Wed Jun 27 16:31:36
2007 +0100
@@ -121,6 +121,11 @@ void xen_destroy_contiguous_region(
void xen_destroy_contiguous_region(
unsigned long vstart, unsigned int order);
+struct page;
+
+int xen_limit_pages_to_max_mfn(
+ struct page *pages, unsigned int order, unsigned int address_bits);
+
/* Turn jiffies into Xen system time. */
u64 jiffies_to_st(unsigned long jiffies);
diff -r 164599b81cfd -r 0be610b725fa kernel/kexec.c
--- a/kernel/kexec.c Wed Jun 27 12:51:35 2007 +0100
+++ b/kernel/kexec.c Wed Jun 27 16:31:36 2007 +0100
@@ -345,8 +345,7 @@ static struct page *kimage_alloc_pages(g
else
address_bits = long_log2(limit);
- if (xen_create_contiguous_region((unsigned
long)page_address(pages),
- order, address_bits) < 0) {
+ if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0)
{
__free_pages(pages, order);
return NULL;
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|