WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: how to handle paged hypercall args?

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] Re: how to handle paged hypercall args?
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Thu, 11 Nov 2010 15:33:38 +0100
Delivery-date: Thu, 11 Nov 2010 06:34:22 -0800
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1289486019; l=4923; s=domk; d=aepfle.de; h=In-Reply-To:Content-Type:MIME-Version:References:Subject:To:From: Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=9D64QFledbWXxZmK+DRa7PKkZOw=; b=jVtDKSUgh4GOfp/4ZAzO7iP1+pMyPdsA+dkxrEJvVOKRd/3ozNXLChvcDvYxV1iwkKG 14qgffqjnP2IdJOiLbbcV4S1dYRwCYjtH3aJ1RSzoRPjgO4bLN21fC68zf5dShtFNA0x+ vRYqZ8F4Vam7CcJqIqymjt1Ng8mIThv22bo=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <20101110085817.GB4427@xxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101110085817.GB4427@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.20 (2009-06-14)
On Wed, Nov 10, Olaf Hering wrote:

> I'm currently reading through the callers of __hvm_copy(). Some of them
> detect HVMCOPY_gfn_paged_out and so some sort of retry. Others just
> ignore the return codes, or turn them into generic errors. In the case
> of copy_from/to_guest each caller needs an audit if a retry is possible.

A frist patch which avoids the BUG_ON is below. 
It turned out that the guests pagetable were just nominated and
paged-out during the preempted do_memory_op hypercall. So
copy_from_guest failed in decrease_reservation() and do_memory_op().

I have added also some error handling in case the copy_to_user fails.
However, only the decrease_reservation() code path is runtime tested and
in fact this whole patch is not yet compile-tested. Its just a heads-up.


So is that an acceptable way to deal with the HVMCOPY_gfn_paged_out
return codes from __hvm_copy?
Or should I explore some different way, like spinning there and possible
let other threads-of-execution make progress while waiting for the gfns
to come back?


Olaf

---
 xen/arch/x86/hvm/hvm.c |    4 ++++
 xen/common/memory.c    |   43 ++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 42 insertions(+), 5 deletions(-)

--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
@@ -1853,6 +1853,8 @@ unsigned long copy_to_user_hvm(void *to,
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
+    if ( rc == HVMCOPY_gfn_paged_out )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -1869,6 +1871,8 @@ unsigned long copy_from_user_hvm(void *t
 #endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
+    if ( rc == HVMCOPY_gfn_paged_out )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
--- xen-4.0.1-testing.orig/xen/common/memory.c
+++ xen-4.0.1-testing/xen/common/memory.c
@@ -47,6 +47,7 @@ static void increase_reservation(struct
 {
     struct page_info *page;
     unsigned long i;
+    unsigned long ctg_ret;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
 
@@ -80,8 +81,14 @@ static void increase_reservation(struct
         if ( !guest_handle_is_null(a->extent_list) )
         {
             mfn = page_to_mfn(page);
-            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+            ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+            if ( unlikely(ctg_ret) )
+            {
+                free_domheap_pages(page, a->extent_order);
+                if ( (long)ctg_ret == -EAGAIN )
+                    a->preempted = 1;
                 goto out;
+            }
         }
     }
 
@@ -93,6 +100,7 @@ static void populate_physmap(struct memo
 {
     struct page_info *page;
     unsigned long i, j;
+    unsigned long ctg_ret;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
 
@@ -111,8 +119,13 @@ static void populate_physmap(struct memo
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+        j = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
+        if ( unlikely(j) )
+        {
+            if ( (long)j == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( a->memflags & MEMF_populate_on_demand )
         {
@@ -142,8 +155,17 @@ static void populate_physmap(struct memo
                     set_gpfn_from_mfn(mfn + j, gpfn + j);
 
                 /* Inform the domain of the new page's machine address. */ 
-                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 
1)) )
+                ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+                if ( unlikely(ctg_ret) )
+                {
+                    for ( j = 0; j < (1 << a->extent_order); j++ )
+                        set_gpfn_from_mfn(mfn + j, INVALID_P2M_ENTRY);
+                    guest_physmap_remove_page(d, gpfn, mfn, a->extent_order);
+                    free_domheap_pages(page, a->extent_order);
+                    if ( (long)ctg_ret == -EAGAIN )
+                        a->preempted = 1;
                     goto out;
+                }
             }
         }
     }
@@ -226,8 +248,13 @@ static void decrease_reservation(struct
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+        j = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
+        if ( unlikely(j) )
+        {
+            if ( (long)j == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( tb_init_done )
         {
@@ -511,6 +538,7 @@ long do_memory_op(unsigned long cmd, XEN
     int rc, op;
     unsigned int address_bits;
     unsigned long start_extent;
+    unsigned long cfg_ret;
     struct xen_memory_reservation reservation;
     struct memop_args args;
     domid_t domid;
@@ -524,8 +552,13 @@ long do_memory_op(unsigned long cmd, XEN
     case XENMEM_populate_physmap:
         start_extent = cmd >> MEMOP_EXTENT_SHIFT;
 
-        if ( copy_from_guest(&reservation, arg, 1) )
+        cfg_ret = copy_from_guest(&reservation, arg, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                return hypercall_create_continuation(__HYPERVISOR_memory_op, 
"lh", cmd, arg);
             return start_extent;
+        }
 
         /* Is size too large for us to encode a continuation? */
         if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel