WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 17/21] xenpaging: handle HVMCOPY_gfn_paged_out in cop

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 17/21] xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Fri, 26 Nov 2010 14:49:18 +0100
Delivery-date: Fri, 26 Nov 2010 06:20:17 -0800
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1290779363; l=4820; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=LXFnU2rnKoNXYzIrjwjdrbSg+vA=; b=ia0I/p2sZ2HM2QmoCMUWSbWZ0Wf6XOV+3TBUbD0uOm+0GHWN+wXYEwKY8pzfQf10oCo viTemNYMF6owNVeRSm4NPfSVKltfS/q/tqeRtcBXQFtX9tCCd4epWLKrK5tsR7HPDhkZT aR6bSzp8t7mT5lrAJ7/5xFw/kluJQDyjBBs=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101126134901.384130351@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
This is a quick fix to keep my guests running. 
The proper fix will use the recently added waitqueue API.


copy_from_user_hvm can fail when __hvm_copy returns
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
pagetable walk.  This has to be handled in some way.  One hypercall that
failed was do_memory_op/XENMEM_decrease_reservation which lead to a
BUG_ON balloon.c.  Since do_memory_op already has restart support for
the hypercall, copy_from_guest uses this existing retry code.  In
addition, cleanup on error was added to increase_reservation and
populate_physmap.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/hvm.c |    4 ++++
 xen/common/memory.c    |   44 +++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 43 insertions(+), 5 deletions(-)

--- xen-unstable.hg-4.1.22433.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22433/xen/arch/x86/hvm/hvm.c
@@ -2066,6 +2066,8 @@ unsigned long copy_to_user_hvm(void *to,
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
+    if ( rc == HVMCOPY_gfn_paged_out )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -2083,6 +2085,8 @@ unsigned long copy_from_user_hvm(void *t
 #endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
+    if ( rc == HVMCOPY_gfn_paged_out )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
--- xen-unstable.hg-4.1.22433.orig/xen/common/memory.c
+++ xen-unstable.hg-4.1.22433/xen/common/memory.c
@@ -47,6 +47,7 @@ static void increase_reservation(struct
 {
     struct page_info *page;
     unsigned long i;
+    unsigned long ctg_ret;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
 
@@ -80,8 +81,14 @@ static void increase_reservation(struct
         if ( !guest_handle_is_null(a->extent_list) )
         {
             mfn = page_to_mfn(page);
-            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+            ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+            if ( unlikely(ctg_ret) )
+            {
+                free_domheap_pages(page, a->extent_order);
+                if ( (long)ctg_ret == -EAGAIN )
+                    a->preempted = 1;
                 goto out;
+            }
         }
     }
 
@@ -93,6 +100,7 @@ static void populate_physmap(struct memo
 {
     struct page_info *page;
     unsigned long i, j;
+    unsigned long cftg_ret;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
 
@@ -111,8 +119,13 @@ static void populate_physmap(struct memo
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+        cftg_ret = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
+        if ( unlikely(cftg_ret) )
+        {
+            if ( (long)cftg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( a->memflags & MEMF_populate_on_demand )
         {
@@ -142,8 +155,17 @@ static void populate_physmap(struct memo
                     set_gpfn_from_mfn(mfn + j, gpfn + j);
 
                 /* Inform the domain of the new page's machine address. */ 
-                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 
1)) )
+                cftg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+                if ( unlikely(cftg_ret) )
+                {
+                    for ( j = 0; j < (1 << a->extent_order); j++ )
+                        set_gpfn_from_mfn(mfn + j, INVALID_M2P_ENTRY);
+                    guest_physmap_remove_page(d, gpfn, mfn, a->extent_order);
+                    free_domheap_pages(page, a->extent_order);
+                    if ( (long)cftg_ret == -EAGAIN )
+                        a->preempted = 1;
                     goto out;
+                }
             }
         }
     }
@@ -212,6 +234,7 @@ int guest_remove_page(struct domain *d,
 static void decrease_reservation(struct memop_args *a)
 {
     unsigned long i, j;
+    unsigned long cfg_ret;
     xen_pfn_t gmfn;
 
     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
@@ -226,8 +249,13 @@ static void decrease_reservation(struct
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+        cfg_ret = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( tb_init_done )
         {
@@ -508,6 +536,7 @@ long do_memory_op(unsigned long cmd, XEN
     int rc, op;
     unsigned int address_bits;
     unsigned long start_extent;
+    unsigned long cfg_ret;
     struct xen_memory_reservation reservation;
     struct memop_args args;
     domid_t domid;
@@ -521,8 +550,13 @@ long do_memory_op(unsigned long cmd, XEN
     case XENMEM_populate_physmap:
         start_extent = cmd >> MEMOP_EXTENT_SHIFT;
 
-        if ( copy_from_guest(&reservation, arg, 1) )
+        cfg_ret = copy_from_guest(&reservation, arg, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                return hypercall_create_continuation(__HYPERVISOR_memory_op, 
"lh", cmd, arg);
             return start_extent;
+        }
 
         /* Is size too large for us to encode a continuation? */
         if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel