- don't do multicall when nr_mcl is zero (and specifically don't access
cr_mcl[nr_mcl - 1] in that case)
- fix CONFIG_XEN_COMPAT <= 0x030002 handling
- don't exchange pages already meeting the restriction (likely avoiding
exchanging anything at all)
- avoid calling kmap functions without CONFIG_XEN_SCRUB_PAGES
- eliminate a few local variables
As usual, written and tested on 2.6.24 and made apply to the 2.6.18
tree without further testing.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: head-2008-01-28/arch/i386/mm/hypervisor.c
===================================================================
--- head-2008-01-28.orig/arch/i386/mm/hypervisor.c 2008-02-08
11:30:34.000000000 +0100
+++ head-2008-01-28/arch/i386/mm/hypervisor.c 2008-02-08 14:35:09.000000000
+0100
@@ -432,19 +432,17 @@ int xen_limit_pages_to_max_mfn(
{
unsigned long flags, frame;
unsigned long *in_frames = discontig_frames, *out_frames =
limited_frames;
- void *v;
struct page *page;
- unsigned int i, nr_mcl;
+ unsigned int i, n, nr_mcl;
int rc, success;
+ DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
struct xen_memory_exchange exchange = {
.in = {
- .nr_extents = 1UL << order,
.extent_order = 0,
.domid = DOMID_SELF
},
.out = {
- .nr_extents = 1UL << order,
.extent_order = 0,
.address_bits = address_bits,
.domid = DOMID_SELF
@@ -457,80 +455,98 @@ int xen_limit_pages_to_max_mfn(
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
+ bitmap_zero(limit_map, 1U << order);
set_xen_guest_handle(exchange.in.extent_start, in_frames);
set_xen_guest_handle(exchange.out.extent_start, out_frames);
/* 0. Scrub the pages. */
- for ( i = 0 ; i < 1UL<<order ; i++ ) {
+ for (i = 0, n = 0; i < 1U<<order ; i++) {
page = &pages[i];
+ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits -
PAGE_SHIFT)))
+ continue;
+ __set_bit(i, limit_map);
- if (!PageHighMem(page)) {
- v = page_address(page);
- scrub_pages(v, 1);
- } else {
- v = kmap(page);
- scrub_pages(v, 1);
+ if (!PageHighMem(page))
+ scrub_pages(page_address(page), 1);
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ else {
+ scrub_pages(kmap(page), 1);
kunmap(page);
+ ++n;
}
+#endif
}
+ if (bitmap_empty(limit_map, 1U << order))
+ return 0;
- kmap_flush_unused();
+ if (n)
+ kmap_flush_unused();
balloon_lock(flags);
/* 1. Zap current PTEs (if any), remembering MFNs. */
- for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
+ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
+ if(!test_bit(i, limit_map))
+ continue;
page = &pages[i];
- out_frames[i] = page_to_pfn(page);
- in_frames[i] = pfn_to_mfn(out_frames[i]);
+ out_frames[n] = page_to_pfn(page);
+ in_frames[n] = pfn_to_mfn(out_frames[n]);
if (!PageHighMem(page))
MULTI_update_va_mapping(cr_mcl + nr_mcl++,
(unsigned
long)page_address(page),
__pte_ma(0), 0);
- set_phys_to_machine(out_frames[i], INVALID_P2M_ENTRY);
+ set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
+ ++n;
}
- if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+ if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
BUG();
/* 2. Get new memory below the required limit. */
+ exchange.in.nr_extents = n;
+ exchange.out.nr_extents = n;
rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
- success = (exchange.nr_exchanged == (1UL << order));
+ success = (exchange.nr_exchanged == n);
BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
BUG_ON(success && (rc != 0));
#if CONFIG_XEN_COMPAT <= 0x030002
if (unlikely(rc == -ENOSYS)) {
/* Compatibility when XENMEM_exchange is unsupported. */
if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &exchange.in) != (1UL << order))
+ &exchange.in) != n)
+ BUG();
+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
+ &exchange.out) != n)
BUG();
- success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
- &exchange.out) != (1UL
<<order));
+ success = 1;
}
#endif
/* 3. Map the new pages in place of old pages. */
- for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
- unsigned long pfn;
+ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
+ if(!test_bit(i, limit_map))
+ continue;
page = &pages[i];
- pfn = page_to_pfn(page);
- frame = success ? out_frames[i] : in_frames[i];
+ frame = success ? out_frames[n] : in_frames[n];
if (!PageHighMem(page))
MULTI_update_va_mapping(cr_mcl + nr_mcl++,
(unsigned
long)page_address(page),
pfn_pte_ma(frame, PAGE_KERNEL),
0);
- set_phys_to_machine(pfn, frame);
+ set_phys_to_machine(page_to_pfn(page), frame);
+ ++n;
+ }
+ if (nr_mcl) {
+ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
+ ?
UVMF_TLB_FLUSH|UVMF_ALL
+ :
UVMF_INVLPG|UVMF_ALL;
+ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+ BUG();
}
- cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
- ?
UVMF_TLB_FLUSH|UVMF_ALL
- : UVMF_INVLPG|UVMF_ALL;
- if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
- BUG();
balloon_unlock(flags);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|