WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] Re: how to handle paged hypercall args?

To: "Olaf Hering" <olaf@xxxxxxxxx>
Subject: Re: [Xen-devel] Re: how to handle paged hypercall args?
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Thu, 02 Dec 2010 10:25:06 +0000
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx, Keir Fraser <keir@xxxxxxx>
Delivery-date: Thu, 02 Dec 2010 02:25:41 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <20101202101122.GA30374@xxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <C909B8B7.2817A%keir@xxxxxxx> <C90ACD8F.A6E9%keir@xxxxxxx> <20101202101122.GA30374@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
>>> On 02.12.10 at 11:11, Olaf Hering <olaf@xxxxxxxxx> wrote:
> On Thu, Nov 18, Keir Fraser wrote:
> 
>> I've done something along these lines now as xen-unstable:22402. It actually
>> seems to work okay! So you can go ahead and use waitqueues in __hvm_copy()
>> now.
> 
> This is my first attempt to do it.

I didn't look in detail whether that's being done in a non-intuitive
way elsewhere, but I can't see how the event you're waiting on
would ever get signaled - wouldn't you need to pass it into
__hvm_copy() and further down from there?

Jan

> It crashed Xen on the very first try in a spectacular way. But it
> happend only once for some reason.
> See my other mail.
> 
> 
> Olaf
> 
> --- xen-unstable.hg-4.1.22447.orig/xen/arch/x86/hvm/hvm.c
> +++ xen-unstable.hg-4.1.22447/xen/arch/x86/hvm/hvm.c
> @@ -1986,69 +1986,117 @@ static enum hvm_copy_result __hvm_copy(
>  enum hvm_copy_result hvm_copy_to_guest_phys(
>      paddr_t paddr, void *buf, int size)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, paddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, paddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_to_guest_virt(
>      unsigned long vaddr, void *buf, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec)) != 
> HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_virt(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_fetch_from_guest_virt(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
>      if ( hvm_nx_enabled(current) )
>          pfec |= PFEC_insn_fetch;
> -    return __hvm_copy(buf, vaddr, size,
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
>      unsigned long vaddr, void *buf, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec)) != 
> HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
>      if ( hvm_nx_enabled(current) )
>          pfec |= PFEC_insn_fetch;
> -    return __hvm_copy(buf, vaddr, size,
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int 
> len)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel