WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [RFC] i386 highmem assist hypercalls

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [RFC] i386 highmem assist hypercalls
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Thu, 24 Jul 2008 14:25:40 +0100
Delivery-date: Thu, 24 Jul 2008 06:25:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
While looking at the origin of very frequently executed hypercalls (as
usual, kernel builds are what's being measured), I realized that the high
page accessor functions in Linux would be good candidates to handle
in the hypervisor - clearing or copying to/from a high page is a pretty
frequent operation (provided there's enough memory). However, the
measured results aren't exactly as expected: While the number of
hypercalls dropped by almost 20%, elapsed real time for the builds
didn't change. What shrunk was just elapsed kernel time (by about
4%). With that, I'm not certain the respective changes are worthwhile,
and hence would like to gather other people's opinions.

The hypervisor patch is below (using temporary numbers for the newly
added sub-hypercalls), the Linux patch (against 2.6.26, so just for
reference) is attached.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2008-07-21/xen/arch/x86/mm.c
===================================================================
--- 2008-07-21.orig/xen/arch/x86/mm.c   2008-07-18 16:19:34.000000000 +0200
+++ 2008-07-21/xen/arch/x86/mm.c        2008-07-21 08:36:26.000000000 +0200
@@ -2218,6 +2218,29 @@ static inline cpumask_t vcpumask_to_pcpu
     return pmask;
 }
 
+#ifdef __i386__
+static inline void *fixmap_domain_page(unsigned long mfn)
+{
+    unsigned int cpu = smp_processor_id();
+    void *ptr = (void *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu);
+
+    l1e_write(fix_pae_highmem_pl1e - cpu,
+              l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    flush_tlb_one_local(ptr);
+    return ptr;
+}
+static inline void fixunmap_domain_page(const void *ptr)
+{
+    unsigned int cpu = virt_to_fix((unsigned long)ptr) - FIX_PAE_HIGHMEM_0;
+
+    l1e_write(fix_pae_highmem_pl1e - cpu, l1e_empty());
+    this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time);
+}
+#else
+#define fixmap_domain_page(mfn) mfn_to_virt(mfn)
+#define fixunmap_domain_page(ptr) ((void)(ptr))
+#endif
+
 int do_mmuext_op(
     XEN_GUEST_HANDLE(mmuext_op_t) uops,
     unsigned int count,
@@ -2482,6 +2505,66 @@ int do_mmuext_op(
             break;
         }
 
+        case MMUEXT_CLEAR_PAGE:
+        {
+            unsigned char *ptr;
+
+            okay = get_page_and_type_from_pagenr(mfn, PGT_writable_page,
+                                                 FOREIGNDOM);
+            if ( unlikely(!okay) )
+            {
+                MEM_LOG("Error while clearing mfn %lx", mfn);
+                break;
+            }
+
+            /* A page is dirtied when it's being cleared. */
+            paging_mark_dirty(d, mfn);
+
+            ptr = fixmap_domain_page(mfn);
+            clear_page(ptr);
+            fixunmap_domain_page(ptr);
+
+            put_page_and_type(page);
+            break;
+        }
+
+        case MMUEXT_COPY_PAGE:
+        {
+            const unsigned char *src;
+            unsigned char *dst;
+            unsigned long src_mfn;
+
+            src_mfn = gmfn_to_mfn(FOREIGNDOM, op.arg2.src_mfn);
+            okay = get_page_from_pagenr(src_mfn, FOREIGNDOM);
+            if ( unlikely(!okay) )
+            {
+                MEM_LOG("Error while copying from mfn %lx", src_mfn);
+                break;
+            }
+
+            okay = get_page_and_type_from_pagenr(mfn, PGT_writable_page,
+                                                 FOREIGNDOM);
+            if ( unlikely(!okay) )
+            {
+                put_page(mfn_to_page(src_mfn));
+                MEM_LOG("Error while copying to mfn %lx", mfn);
+                break;
+            }
+
+            /* A page is dirtied when it's being copied to. */
+            paging_mark_dirty(d, mfn);
+
+            src = map_domain_page(src_mfn);
+            dst = fixmap_domain_page(mfn);
+            copy_page(dst, src);
+            fixunmap_domain_page(dst);
+            unmap_domain_page(src);
+
+            put_page_and_type(page);
+            put_page(mfn_to_page(src_mfn));
+            break;
+        }
+
         default:
             MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
             rc = -ENOSYS;
Index: 2008-07-21/xen/arch/x86/x86_32/domain_page.c
===================================================================
--- 2008-07-21.orig/xen/arch/x86/x86_32/domain_page.c   2008-06-17 
13:59:52.000000000 +0200
+++ 2008-07-21/xen/arch/x86/x86_32/domain_page.c        2008-07-21 
08:36:26.000000000 +0200
@@ -114,7 +114,7 @@ void *map_domain_page(unsigned long mfn)
     return (void *)va;
 }
 
-void unmap_domain_page(void *va)
+void unmap_domain_page(const void *va)
 {
     unsigned int idx;
     struct vcpu *v;
@@ -238,7 +238,7 @@ void *map_domain_page_global(unsigned lo
     return (void *)va;
 }
 
-void unmap_domain_page_global(void *va)
+void unmap_domain_page_global(const void *va)
 {
     unsigned long __va = (unsigned long)va;
     l2_pgentry_t *pl2e;
Index: 2008-07-21/xen/arch/x86/x86_64/compat/mm.c
===================================================================
--- 2008-07-21.orig/xen/arch/x86/x86_64/compat/mm.c     2008-06-16 
10:43:34.000000000 +0200
+++ 2008-07-21/xen/arch/x86/x86_64/compat/mm.c  2008-07-21 08:36:26.000000000 
+0200
@@ -217,6 +217,8 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
             case MMUEXT_PIN_L4_TABLE:
             case MMUEXT_UNPIN_TABLE:
             case MMUEXT_NEW_BASEPTR:
+            case MMUEXT_CLEAR_PAGE:
+            case MMUEXT_COPY_PAGE:
                 arg1 = XLAT_mmuext_op_arg1_mfn;
                 break;
             default:
@@ -244,6 +246,9 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
             case MMUEXT_INVLPG_MULTI:
                 arg2 = XLAT_mmuext_op_arg2_vcpumask;
                 break;
+            case MMUEXT_COPY_PAGE:
+                arg2 = XLAT_mmuext_op_arg2_src_mfn;
+                break;
             default:
                 arg2 = -1;
                 break;
Index: 2008-07-21/xen/include/asm-x86/fixmap.h
===================================================================
--- 2008-07-21.orig/xen/include/asm-x86/fixmap.h        2008-06-17 
13:59:52.000000000 +0200
+++ 2008-07-21/xen/include/asm-x86/fixmap.h     2008-07-21 08:36:26.000000000 
+0200
@@ -29,6 +29,7 @@
  * from the end of virtual memory backwards.
  */
 enum fixed_addresses {
+    FIX_HOLE,
 #ifdef __i386__
     FIX_PAE_HIGHMEM_0,
     FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1,
Index: 2008-07-21/xen/include/public/xen.h
===================================================================
--- 2008-07-21.orig/xen/include/public/xen.h    2008-06-17 13:59:52.000000000 
+0200
+++ 2008-07-21/xen/include/public/xen.h 2008-07-21 08:36:26.000000000 +0200
@@ -231,6 +231,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
  * cmd: MMUEXT_SET_LDT
  * linear_addr: Linear address of LDT base (NB. must be page-aligned).
  * nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
  */
 #define MMUEXT_PIN_L1_TABLE      0
 #define MMUEXT_PIN_L2_TABLE      1
@@ -247,12 +254,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #define MMUEXT_FLUSH_CACHE      12
 #define MMUEXT_SET_LDT          13
 #define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE   0x1000
+#define MMUEXT_COPY_PAGE    0x1001
 
 #ifndef __ASSEMBLY__
 struct mmuext_op {
     unsigned int cmd;
     union {
-        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+         * CLEAR_PAGE, COPY_PAGE */
         xen_pfn_t     mfn;
         /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
         unsigned long linear_addr;
@@ -266,6 +276,8 @@ struct mmuext_op {
 #else
         void *vcpumask;
 #endif
+        /* COPY_PAGE */
+        xen_pfn_t src_mfn;
     } arg2;
 };
 typedef struct mmuext_op mmuext_op_t;
Index: 2008-07-21/xen/include/xen/domain_page.h
===================================================================
--- 2008-07-21.orig/xen/include/xen/domain_page.h       2008-06-17 
13:59:52.000000000 +0200
+++ 2008-07-21/xen/include/xen/domain_page.h    2008-07-21 08:36:26.000000000 
+0200
@@ -24,7 +24,7 @@ void *map_domain_page(unsigned long mfn)
  * Pass a VA within a page previously mapped in the context of the
  * currently-executing VCPU via a call to map_domain_page().
  */
-void unmap_domain_page(void *va);
+void unmap_domain_page(const void *va);
 
 /*
  * Similar to the above calls, except the mapping is accessible in all
@@ -32,7 +32,7 @@ void unmap_domain_page(void *va);
  * mappings can also be unmapped from any context.
  */
 void *map_domain_page_global(unsigned long mfn);
-void unmap_domain_page_global(void *va);
+void unmap_domain_page_global(const void *va);
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
@@ -75,7 +75,7 @@ map_domain_page_with_cache(unsigned long
 }
 
 static inline void
-unmap_domain_page_with_cache(void *va, struct domain_mmap_cache *cache)
+unmap_domain_page_with_cache(const void *va, struct domain_mmap_cache *cache)
 {
     ASSERT(cache != NULL);
     cache->flags &= ~DMCACHE_ENTRY_HELD;


Attachment: xenlinux-i386-highmem-assist.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>