WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] 32-on-64: Fix domain address-size clampin

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] 32-on-64: Fix domain address-size clamping, implement
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 06 Dec 2007 10:10:14 -0800
Delivery-date: Thu, 06 Dec 2007 10:11:30 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1196948359 0
# Node ID cd5e1e76d0bc66440a04122baa27860f5d763b5b
# Parent  3221dff4b460c88f93297f02acca53894ffda7b6
32-on-64: Fix domain address-size clamping, implement
copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64
Xen.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c       |    2 --
 xen/arch/x86/domain_build.c |    2 +-
 xen/arch/x86/e820.c         |    7 -------
 xen/arch/x86/x86_64/mm.c    |    2 +-
 xen/common/grant_table.c    |   25 +++++++++++++++++++++++++
 xen/common/memory.c         |   15 +++------------
 xen/common/page_alloc.c     |   16 +++++++---------
 7 files changed, 37 insertions(+), 32 deletions(-)

diff -r 3221dff4b460 -r cd5e1e76d0bc xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/arch/x86/domain.c     Thu Dec 06 13:39:19 2007 +0000
@@ -319,8 +319,6 @@ int switch_native(struct domain *d)
             release_compat_l4(d->vcpu[vcpuid]);
     }
 
-    d->arch.physaddr_bitsize = 64;
-
     return 0;
 }
 
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/arch/x86/domain_build.c       Thu Dec 06 13:39:19 2007 +0000
@@ -367,7 +367,7 @@ int __init construct_dom0(
 #ifdef CONFIG_COMPAT
         HYPERVISOR_COMPAT_VIRT_START(d) =
             max_t(unsigned int, m2p_compat_vstart, value);
-        d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
+        d->arch.physaddr_bitsize =
             fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
             + (PAGE_SIZE - 2);
         if ( value > (!is_pv_32on64_domain(d) ?
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/arch/x86/e820.c
--- a/xen/arch/x86/e820.c       Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/arch/x86/e820.c       Thu Dec 06 13:39:19 2007 +0000
@@ -370,13 +370,6 @@ static void __init machine_specific_memo
                   "can be accessed by Xen in 32-bit mode.");
 #endif
 
-#ifdef __x86_64__
-    clip_to_limit((uint64_t)(MACH2PHYS_COMPAT_VIRT_END -
-                             __HYPERVISOR_COMPAT_VIRT_START) << 10,
-                  "Only the first %u GB of the physical memory map "
-                  "can be accessed by 32-on-64 guests.");
-#endif
-
     reserve_dmi_region();
 }
 
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Thu Dec 06 13:39:19 2007 +0000
@@ -442,7 +442,7 @@ int check_descriptor(const struct domain
 
 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
 {
-    if ( d == NULL )
+    if ( (d == NULL) || !is_pv_32on64_domain(d) )
         return bits;
     return min(d->arch.physaddr_bitsize, bits);
 }
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/common/grant_table.c
--- a/xen/common/grant_table.c  Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/common/grant_table.c  Thu Dec 06 13:39:19 2007 +0000
@@ -1081,9 +1081,34 @@ gnttab_transfer(
 
         if ( xsm_grant_transfer(d, e) )
         {
+        unlock_and_copyback:
             rcu_unlock_domain(e);
+            page->count_info &= ~(PGC_count_mask|PGC_allocated);
+            free_domheap_page(page);
             gop.status = GNTST_permission_denied;
             goto copyback;
+        }
+
+        if ( (1UL << domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)) <= mfn )
+        {
+            struct page_info *new_page;
+            void *sp, *dp;
+
+            new_page = alloc_domheap_pages(
+                NULL, 0, 
+                MEMF_bits(domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)));
+            if ( new_page == NULL )
+                goto unlock_and_copyback;
+
+            sp = map_domain_page(mfn);
+            dp = map_domain_page(page_to_mfn(new_page));
+            memcpy(dp, sp, PAGE_SIZE);
+            unmap_domain_page(dp);
+            unmap_domain_page(sp);
+
+            page->count_info &= ~(PGC_count_mask|PGC_allocated);
+            free_domheap_page(page);
+            page = new_page;
         }
 
         spin_lock(&e->page_alloc_lock);
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/common/memory.c
--- a/xen/common/memory.c       Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/common/memory.c       Thu Dec 06 13:39:19 2007 +0000
@@ -319,18 +319,6 @@ static long memory_exchange(XEN_GUEST_HA
         goto fail_early;
     }
 
-    if ( (exch.out.address_bits != 0) &&
-         (exch.out.address_bits <
-          (get_order_from_pages(max_page) + PAGE_SHIFT)) )
-    {
-        if ( exch.out.address_bits <= PAGE_SHIFT )
-        {
-            rc = -ENOMEM;
-            goto fail_early;
-        }
-        memflags = MEMF_bits(exch.out.address_bits);
-    }
-
     if ( exch.in.extent_order <= exch.out.extent_order )
     {
         in_chunk_order  = exch.out.extent_order - exch.in.extent_order;
@@ -352,6 +340,9 @@ static long memory_exchange(XEN_GUEST_HA
         goto fail_early;
     }
     d = current->domain;
+
+    memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
+        d, exch.out.address_bits ? : BITS_PER_LONG));
 
     cpu = select_local_cpu(d);
 
diff -r 3221dff4b460 -r cd5e1e76d0bc xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Dec 06 12:13:12 2007 +0000
+++ b/xen/common/page_alloc.c   Thu Dec 06 13:39:19 2007 +0000
@@ -786,15 +786,13 @@ struct page_info *__alloc_domheap_pages(
 
     ASSERT(!in_irq());
 
-    if ( bits )
-    {
-        bits = domain_clamp_alloc_bitsize(d, bits);
-        if ( bits <= (PAGE_SHIFT + 1) )
-            return NULL;
-        bits -= PAGE_SHIFT + 1;
-        if ( bits < zone_hi )
-            zone_hi = bits;
-    }
+    bits = domain_clamp_alloc_bitsize(d, bits ? : BITS_PER_LONG);
+    if ( bits <= (PAGE_SHIFT + 1) )
+        return NULL;
+
+    bits -= PAGE_SHIFT + 1;
+    if ( bits < zone_hi )
+        zone_hi = bits;
 
     if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
     {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] 32-on-64: Fix domain address-size clamping, implement, Xen patchbot-unstable <=