WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 4/6] xenpaging: handle HVMCOPY_gfn_paged_out in copy_

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 4/6] xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Sun, 16 Jan 2011 17:32:33 +0100
Delivery-date: Sun, 16 Jan 2011 08:37:21 -0800
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1295195551; l=4316; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=EumA+rpDAnFv4C4xqR0hw1sC2z0=; b=KEURnYaH8s2hHXot1Hngj6GRX09ppxyv+aoAU8MsVSgZlIpFW/6g0msngzTNOqdkLTs rPgy6cH2qMeL6M635tcRUYPB6aduCnjltAeuEjcRSKp6J0NmrWM1+tz5kDDpvguEYYxIx iEkrAY7M6X5HNq3qJZXHb75drtSqpwiZ7WI=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20110116163229.044047464@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
copy_from_user_hvm can fail when __hvm_copy returns
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
pagetable walk.  This has to be handled in some way.

For the time being, return -EAGAIN for the most common case (xen_balloon
driver crashing in guest) until the recently added waitqueues will be
used.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/hvm.c |    4 ++++
 xen/common/memory.c    |   39 ++++++++++++++++++++++++++++++++++-----
 2 files changed, 38 insertions(+), 5 deletions(-)

--- xen-unstable.hg-4.1.22764.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22764/xen/arch/x86/hvm/hvm.c
@@ -2163,6 +2163,8 @@ unsigned long copy_to_user_hvm(void *to,
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
+    if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -2180,6 +2182,8 @@ unsigned long copy_from_user_hvm(void *t
 #endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
+    if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
--- xen-unstable.hg-4.1.22764.orig/xen/common/memory.c
+++ xen-unstable.hg-4.1.22764/xen/common/memory.c
@@ -48,6 +48,7 @@ static void increase_reservation(struct
 {
     struct page_info *page;
     unsigned long i;
+    unsigned long ctg_ret;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
 
@@ -81,8 +82,13 @@ static void increase_reservation(struct
         if ( !guest_handle_is_null(a->extent_list) )
         {
             mfn = page_to_mfn(page);
-            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+            ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+            if ( unlikely(ctg_ret) )
+            {
+                if ( (long)ctg_ret == -EAGAIN )
+                    a->preempted = 1;
                 goto out;
+            }
         }
     }
 
@@ -94,6 +100,7 @@ static void populate_physmap(struct memo
 {
     struct page_info *page;
     unsigned long i, j;
+    unsigned long cftg_ret;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
 
@@ -112,8 +119,13 @@ static void populate_physmap(struct memo
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+        cftg_ret = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
+        if ( unlikely(cftg_ret) )
+        {
+            if ( (long)cftg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( a->memflags & MEMF_populate_on_demand )
         {
@@ -143,8 +155,13 @@ static void populate_physmap(struct memo
                     set_gpfn_from_mfn(mfn + j, gpfn + j);
 
                 /* Inform the domain of the new page's machine address. */ 
-                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 
1)) )
+                cftg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+                if ( unlikely(cftg_ret) )
+                {
+                    if ( (long)cftg_ret == -EAGAIN )
+                        a->preempted = 1;
                     goto out;
+                }
             }
         }
     }
@@ -213,6 +230,7 @@ int guest_remove_page(struct domain *d,
 static void decrease_reservation(struct memop_args *a)
 {
     unsigned long i, j;
+    unsigned long cfg_ret;
     xen_pfn_t gmfn;
 
     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
@@ -227,8 +245,13 @@ static void decrease_reservation(struct
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+        cfg_ret = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( tb_init_done )
         {
@@ -509,6 +532,7 @@ long do_memory_op(unsigned long cmd, XEN
     int rc, op;
     unsigned int address_bits;
     unsigned long start_extent;
+    unsigned long cfg_ret;
     struct xen_memory_reservation reservation;
     struct memop_args args;
     domid_t domid;
@@ -522,8 +546,13 @@ long do_memory_op(unsigned long cmd, XEN
     case XENMEM_populate_physmap:
         start_extent = cmd >> MEMOP_EXTENT_SHIFT;
 
-        if ( copy_from_guest(&reservation, arg, 1) )
+        cfg_ret = copy_from_guest(&reservation, arg, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                return hypercall_create_continuation(__HYPERVISOR_memory_op, 
"lh", cmd, arg);
             return start_extent;
+        }
 
         /* Is size too large for us to encode a continuation? */
         if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel