WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] Re: how to handle paged hypercall args?

To: Keir Fraser <keir@xxxxxxx>
Subject: Re: [Xen-devel] Re: how to handle paged hypercall args?
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Thu, 2 Dec 2010 11:11:22 +0100
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx, Jan Beulich <JBeulich@xxxxxxxxxx>
Delivery-date: Thu, 02 Dec 2010 02:12:24 -0800
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1291284692; l=4404; s=domk; d=aepfle.de; h=In-Reply-To:Content-Type:MIME-Version:References:Subject:Cc:To:From: Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=/VsQlRUOyr2Da8ss4tb8DxWGkAs=; b=gaU9B9awikOZK4dfqvxJA0GVKL3I/9gd9Z0lW3xMKQwxr85g5rPZuiTm6XDaTvqsq5j 3Xy0PaCQ7cFaF0dPv/0DrqQ0clIs+HosZ+bLXucmj+xHzkcHiZnfVR0O4n1iSprTObbm7 t9PI5owMlb60SPws4TZpw0/YA+A/zSWg7x8=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <C90ACD8F.A6E9%keir@xxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <C909B8B7.2817A%keir@xxxxxxx> <C90ACD8F.A6E9%keir@xxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.20 (2009-06-14)
On Thu, Nov 18, Keir Fraser wrote:

> I've done something along these lines now as xen-unstable:22402. It actually
> seems to work okay! So you can go ahead and use waitqueues in __hvm_copy()
> now.

This is my first attempt to do it.
It crashed Xen on the very first try in a spectacular way. But it
happend only once for some reason.
See my other mail.


Olaf

--- xen-unstable.hg-4.1.22447.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22447/xen/arch/x86/hvm/hvm.c
@@ -1986,69 +1986,117 @@ static enum hvm_copy_result __hvm_copy(
 enum hvm_copy_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size)
 {
-    return __hvm_copy(buf, paddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, paddr, size,
                       HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
-                      0);
+                      0)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size)
 {
-    return __hvm_copy(buf, paddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, paddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
-                      0);
+                      0)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_copy_to_guest_virt(
     unsigned long vaddr, void *buf, int size, uint32_t pfec)
 {
-    return __hvm_copy(buf, vaddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
-                      PFEC_page_present | PFEC_write_access | pfec);
+                      PFEC_page_present | PFEC_write_access | pfec)) != 
HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_copy_from_guest_virt(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    return __hvm_copy(buf, vaddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
-                      PFEC_page_present | pfec);
+                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_fetch_from_guest_virt(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
     if ( hvm_nx_enabled(current) )
         pfec |= PFEC_insn_fetch;
-    return __hvm_copy(buf, vaddr, size,
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
-                      PFEC_page_present | pfec);
+                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
     unsigned long vaddr, void *buf, int size, uint32_t pfec)
 {
-    return __hvm_copy(buf, vaddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
-                      PFEC_page_present | PFEC_write_access | pfec);
+                      PFEC_page_present | PFEC_write_access | pfec)) != 
HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    return __hvm_copy(buf, vaddr, size,
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
-                      PFEC_page_present | pfec);
+                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
+    enum hvm_copy_result res;
+    struct waitqueue_head wq;
     if ( hvm_nx_enabled(current) )
         pfec |= PFEC_insn_fetch;
-    return __hvm_copy(buf, vaddr, size,
+    init_waitqueue_head(&wq);
+
+    wait_event(wq, (
+    res = __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
-                      PFEC_page_present | pfec);
+                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
+    return res;
 }
 
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel