WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Domctls defined for all relevant memory s

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Domctls defined for all relevant memory sharing operations.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 16 Dec 2009 22:41:14 -0800
Delivery-date: Wed, 16 Dec 2009 22:43:49 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031276 0
# Node ID 68e964ec2c7bdb3b4f8997b3c027cedfaa2c7bb2
# Parent  8cf5bffd9663fc541be4d4a1b63630787739fd0d
Domctls defined for all relevant memory sharing operations.

Signed-off-by: Grzegorz Milos <Grzegorz.Milos@xxxxxxxxxx>
---
 xen/arch/x86/domain.c             |    1 
 xen/arch/x86/domctl.c             |   16 ++
 xen/arch/x86/mm.c                 |    3 
 xen/arch/x86/mm/mem_sharing.c     |  223 ++++++++++++++++++++++++++++++++++----
 xen/common/domctl.c               |    1 
 xen/include/asm-x86/hvm/domain.h  |    1 
 xen/include/asm-x86/mem_sharing.h |    3 
 xen/include/public/domctl.h       |   49 ++++++++
 xen/include/public/memory.h       |    7 +
 xen/include/xen/sched.h           |    1 
 10 files changed, 283 insertions(+), 22 deletions(-)

diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/domain.c     Thu Dec 17 06:27:56 2009 +0000
@@ -404,6 +404,7 @@ int arch_domain_create(struct domain *d,
         is_hvm_domain(d) &&
         hvm_funcs.hap_supported &&
         (domcr_flags & DOMCRF_hap);
+    d->arch.hvm_domain.mem_sharing_enabled = 0;
 
     d->arch.s3_integrity = !!(domcr_flags & DOMCRF_s3_integrity);
 
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/domctl.c     Thu Dec 17 06:27:56 2009 +0000
@@ -32,6 +32,7 @@
 #include <xen/iommu.h>
 #include <asm/mem_event.h>
 #include <public/mem_event.h>
+#include <asm/mem_sharing.h>
 
 #ifdef XEN_GDBSX_CONFIG                    
 #ifdef XEN_KDB_CONFIG
@@ -1317,6 +1318,21 @@ long arch_do_domctl(
     }
     break;
 
+    case XEN_DOMCTL_mem_sharing_op:
+    {
+        struct domain *d;
+
+        ret = -ESRCH;
+        d = rcu_lock_domain_by_id(domctl->domain);
+        if ( d != NULL )
+        {
+            ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
+            rcu_unlock_domain(d);
+            copy_to_guest(u_domctl, domctl, 1);
+        } 
+    }
+    break;
+
     default:
         ret = -ENOSYS;
         break;
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm.c Thu Dec 17 06:27:56 2009 +0000
@@ -4503,6 +4503,9 @@ long arch_memory_op(int op, XEN_GUEST_HA
         return rc;
     }
 
+    case XENMEM_get_sharing_freed_pages:
+        return mem_sharing_get_nr_saved_mfns();
+
     default:
         return subarch_memory_op(op, arg);
     }
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Dec 17 06:27:56 2009 +0000
@@ -24,11 +24,18 @@
 #include <asm/string.h>
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
+#include <asm/atomic.h>
 #include <xen/domain_page.h>
 #include <xen/types.h>
 #include <xen/spinlock.h>
 #include <xen/mm.h>
 #include <xen/sched.h>
+
+
+#define hap_enabled(d) \
+    (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
+#define mem_sharing_enabled(d) \
+    (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
  
 #undef mfn_to_page
 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
@@ -38,6 +45,7 @@
 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
 
 static shr_handle_t next_handle = 1;
+static atomic_t nr_saved_mfns = ATOMIC_INIT(0); 
 
 typedef struct shr_hash_entry 
 {
@@ -64,6 +72,17 @@ typedef struct shr_lock
     const char *locker_function; /* func that took it */
 } shr_lock_t;
 static shr_lock_t shr_lock;
+
+/* Returns true if list has only one entry. O(1) complexity. */
+static inline int list_has_one_entry(struct list_head *head)
+{
+    return (head->next != head) && (head->next->next == head);
+}
+
+static inline struct gfn_info* gfn_get_info(struct list_head *list)
+{
+    return list_entry(list->next, struct gfn_info, list);
+}
 
 #define shr_lock_init(_i)                      \
     do {                                       \
@@ -97,8 +116,6 @@ static shr_lock_t shr_lock;
         spin_unlock(&shr_lock.lock);                      \
     } while (0)
 
-
-
 static void mem_sharing_hash_init(void)
 {
     int i;
@@ -123,9 +140,21 @@ static gfn_info_t *mem_sharing_gfn_alloc
     return xmalloc(gfn_info_t); 
 }
 
-static void mem_sharing_gfn_destroy(gfn_info_t *gfn_info)
-{
-    xfree(gfn_info);
+static void mem_sharing_gfn_destroy(gfn_info_t *gfn, int was_shared)
+{
+    /* Decrement the number of pages, if the gfn was shared before */
+    if(was_shared)
+    {
+        struct domain *d = get_domain_by_id(gfn->domain);
+        /* Domain may have been destroyed by now, if we are called from
+         * p2m_teardown */
+        if(d)
+        {
+            atomic_dec(&d->shr_pages);
+            put_domain(d);
+        }
+    }
+    xfree(gfn);
 }
 
 static shr_hash_entry_t* mem_sharing_hash_lookup(shr_handle_t handle)
@@ -219,6 +248,11 @@ static struct page_info* mem_sharing_all
     return page;
 }
 
+unsigned int mem_sharing_get_nr_saved_mfns(void)
+{
+    return (unsigned int)atomic_read(&nr_saved_mfns);
+}
+
 int mem_sharing_sharing_resume(struct domain *d)
 {
     mem_event_response_t rsp;
@@ -290,6 +324,61 @@ shared_entry_header(struct grant_table *
         return &shared_entry_v2(t, ref).hdr;
 }
 
+static int mem_sharing_gref_to_gfn(struct domain *d, 
+                                   grant_ref_t ref, 
+                                   unsigned long *gfn)
+{
+    if(d->grant_table->gt_version < 1)
+        return -1;
+
+    if (d->grant_table->gt_version == 1) 
+    {
+        grant_entry_v1_t *sha1;
+        sha1 = &shared_entry_v1(d->grant_table, ref);
+        *gfn = sha1->frame;
+        return 0;
+    } 
+    else 
+    {
+        grant_entry_v2_t *sha2;
+        sha2 = &shared_entry_v2(d->grant_table, ref);
+        *gfn = sha2->full_page.frame;
+        return 0;
+    }
+ 
+    return -2;
+}
+
+/* Account for a GFN being shared/unshared.
+ * When sharing this function needs to be called _before_ gfn lists are merged
+ * together, but _after_ gfn is removed from the list when unsharing.
+ */
+static int mem_sharing_gfn_account(struct gfn_info *gfn, int sharing)
+{
+    struct domain *d;
+
+    /* A) When sharing:
+     * if the gfn being shared is in > 1 long list, its already been 
+     * accounted for
+     * B) When unsharing:
+     * if the list is longer than > 1, we don't have to account yet. 
+     */
+    if(list_has_one_entry(&gfn->list))
+    {
+        d = get_domain_by_id(gfn->domain);
+        BUG_ON(!d);
+        if(sharing) 
+            atomic_inc(&d->shr_pages);
+        else
+            atomic_dec(&d->shr_pages);
+        put_domain(d);
+
+        return 1;
+    }
+
+    return 0;
+}
+
 int mem_sharing_debug_gref(struct domain *d, grant_ref_t ref)
 {
     grant_entry_header_t *shah;
@@ -302,21 +391,12 @@ int mem_sharing_debug_gref(struct domain
                 d->domain_id, ref);
         return -1;
     }
+    mem_sharing_gref_to_gfn(d, ref, &gfn); 
     shah = shared_entry_header(d->grant_table, ref);
     if (d->grant_table->gt_version == 1) 
-    {
-        grant_entry_v1_t *sha1;
-        sha1 = &shared_entry_v1(d->grant_table, ref);
         status = shah->flags;
-        gfn = sha1->frame;
-    } 
     else 
-    {
-        grant_entry_v2_t *sha2;
-        sha2 = &shared_entry_v2(d->grant_table, ref);
         status = status_entry(d->grant_table, ref);
-        gfn = sha2->full_page.frame;
-    }
     
     printk("==> Grant [dom=%d,ref=%d], status=%x. ", 
             d->domain_id, ref, status);
@@ -381,7 +461,7 @@ int mem_sharing_nominate_page(struct dom
          * since no-one knew that the mfn was temporarily sharable */
         ASSERT(page_make_private(d, page) == 0);
         mem_sharing_hash_destroy(hash_entry);
-        mem_sharing_gfn_destroy(gfn_info);
+        mem_sharing_gfn_destroy(gfn_info, 0);
         shr_unlock();
         goto out;
     }
@@ -415,14 +495,17 @@ int mem_sharing_share_pages(shr_handle_t
 
     shr_lock();
 
-    ret = -1;
+    ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
     se = mem_sharing_hash_lookup(sh);
     if(se == NULL) goto err_out;
-    ret = -2;
+    ret = XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID;
     ce = mem_sharing_hash_lookup(ch);
     if(ce == NULL) goto err_out;
     spage = mfn_to_page(se->mfn); 
     cpage = mfn_to_page(ce->mfn); 
+    /* gfn lists always have at least one entry => save to call list_entry */
+    mem_sharing_gfn_account(gfn_get_info(&ce->gfns), 1);
+    mem_sharing_gfn_account(gfn_get_info(&se->gfns), 1);
     list_for_each_safe(le, te, &ce->gfns)
     {
         gfn = list_entry(le, struct gfn_info, list);
@@ -440,6 +523,7 @@ int mem_sharing_share_pages(shr_handle_t
     } 
     ASSERT(list_empty(&ce->gfns));
     mem_sharing_hash_delete(ch);
+    atomic_inc(&nr_saved_mfns);
     /* Free the client page */
     if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
         put_page(cpage);
@@ -491,8 +575,13 @@ gfn_found:
      * (possibly freeing the page), and exit early */
     if(flags & MEM_SHARING_DESTROY_GFN)
     {
-        mem_sharing_gfn_destroy(gfn_info);
-        if(last_gfn) mem_sharing_hash_delete(handle);
+        mem_sharing_gfn_destroy(gfn_info, !last_gfn);
+        if(last_gfn) 
+            mem_sharing_hash_delete(handle);
+        else 
+            /* Even though we don't allocate a private page, we have to account
+             * for the MFN that originally backed this PFN. */
+            atomic_dec(&nr_saved_mfns);
         shr_unlock();
         put_page_and_type(page);
         if(last_gfn && 
@@ -528,8 +617,11 @@ gfn_found:
 
 private_page_found:    
     /* We've got a private page, we can commit the gfn destruction */
-    mem_sharing_gfn_destroy(gfn_info);
-    if(last_gfn) mem_sharing_hash_delete(handle);
+    mem_sharing_gfn_destroy(gfn_info, !last_gfn);
+    if(last_gfn) 
+        mem_sharing_hash_delete(handle);
+    else
+        atomic_dec(&nr_saved_mfns);
     shr_unlock();
 
     if(p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != 
@@ -544,6 +636,93 @@ private_page_found:
     return 0;
 }
 
+int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
+{
+    int rc;
+
+    switch(mec->op)
+    {
+        case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
+        {
+            rc = 0;
+            if(!hap_enabled(d))
+                return -EINVAL;
+            d->arch.hvm_domain.mem_sharing_enabled = mec->enable;
+            return 0; 
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN:
+        {
+            unsigned long gfn = mec->nominate.gfn;
+            shr_handle_t handle;
+            if(!mem_sharing_enabled(d))
+                return -EINVAL;
+            rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
+            mec->nominate.handle = handle;
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF:
+        {
+            grant_ref_t gref = mec->nominate.grant_ref;
+            unsigned long gfn;
+            shr_handle_t handle;
+
+            if(!mem_sharing_enabled(d))
+                return -EINVAL;
+            if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0)
+                return -EINVAL;
+            rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
+            mec->nominate.handle = handle;
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_SHARE:
+        {
+            shr_handle_t sh = mec->share.source_handle;
+            shr_handle_t ch = mec->share.client_handle;
+            rc = mem_sharing_share_pages(sh, ch); 
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_RESUME:
+        {
+            if(!mem_sharing_enabled(d))
+                return -EINVAL;
+            rc = mem_sharing_sharing_resume(d);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN:
+        {
+            unsigned long gfn = mec->debug.gfn;
+            rc = mem_sharing_debug_gfn(d, gfn);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN:
+        {
+            unsigned long mfn = mec->debug.mfn;
+            rc = mem_sharing_debug_mfn(mfn);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF:
+        {
+            grant_ref_t gref = mec->debug.gref;
+            rc = mem_sharing_debug_gref(d, gref);
+        }
+        break;
+
+        default:
+            rc = -ENOSYS;
+            break;
+    }
+
+    return rc;
+}
+
 void mem_sharing_init(void)
 {
     printk("Initing memory sharing.\n");
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/common/domctl.c
--- a/xen/common/domctl.c       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/common/domctl.c       Thu Dec 17 06:27:56 2009 +0000
@@ -136,6 +136,7 @@ void getdomaininfo(struct domain *d, str
 
     info->tot_pages         = d->tot_pages;
     info->max_pages         = d->max_pages;
+    info->shr_pages         = atomic_read(&d->shr_pages);
     info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
     BUG_ON(SHARED_M2P(info->shared_info_frame));
 
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/asm-x86/hvm/domain.h  Thu Dec 17 06:27:56 2009 +0000
@@ -90,6 +90,7 @@ struct hvm_domain {
     struct viridian_domain viridian;
 
     bool_t                 hap_enabled;
+    bool_t                 mem_sharing_enabled;
     bool_t                 qemu_mapcache_invalidate;
     bool_t                 is_s3_suspended;
 
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/asm-x86/mem_sharing.h Thu Dec 17 06:27:56 2009 +0000
@@ -27,6 +27,7 @@
 
 typedef uint64_t shr_handle_t; 
 
+unsigned int mem_sharing_get_nr_saved_mfns(void);
 int mem_sharing_nominate_page(struct domain *d, 
                               unsigned long gfn,
                               int expected_refcnt,
@@ -38,6 +39,8 @@ int mem_sharing_unshare_page(struct doma
                              uint16_t flags);
 int mem_sharing_sharing_resume(struct domain *d);
 int mem_sharing_cache_resize(struct domain *d, int new_size);
+int mem_sharing_domctl(struct domain *d, 
+                       xen_domctl_mem_sharing_op_t *mec);
 void mem_sharing_init(void);
 
 #endif /* __MEM_SHARING_H__ */
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/public/domctl.h       Thu Dec 17 06:27:56 2009 +0000
@@ -33,6 +33,7 @@
 #endif
 
 #include "xen.h"
+#include "grant_table.h"
 
 #define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
 
@@ -103,6 +104,7 @@ struct xen_domctl_getdomaininfo {
     uint32_t flags;              /* XEN_DOMINF_* */
     uint64_aligned_t tot_pages;
     uint64_aligned_t max_pages;
+    uint64_aligned_t shr_pages;
     uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
     uint64_aligned_t cpu_time;
     uint32_t nr_online_vcpus;    /* Number of VCPUs currently online. */
@@ -726,6 +728,52 @@ struct xen_domctl_mem_event_op {
 };
 typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
+
+/*
+ * Memory sharing operations
+ */
+#define XEN_DOMCTL_mem_sharing_op  58
+
+#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL        0
+#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN   1
+#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF  2
+#define XEN_DOMCTL_MEM_SHARING_OP_SHARE          3
+#define XEN_DOMCTL_MEM_SHARING_OP_RESUME         4
+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN      5
+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN      6
+#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF     7
+
+#define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID  (-10)
+#define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID  (-9)
+
+struct xen_domctl_mem_sharing_op {
+    uint8_t       op;            /* XEN_DOMCTL_MEM_EVENT_OP_* */
+
+    union {
+        int enable;                       /* for OP_CONTROL            */
+
+        struct mem_sharing_op_nominate {  /* for OP_NOMINATE           */
+            union {
+                unsigned long gfn;        /* IN: gfn to nominate       */
+                uint32_t      grant_ref;  /* IN: grant ref to nominate */
+            };
+            uint64_t       handle;        /* OUT: the handle           */
+        } nominate;
+        struct mem_sharing_op_share {
+            uint64_t source_handle;       /* IN: handle to the source page */
+            uint64_t client_handle;       /* IN: handle to the client page */
+        } share; 
+        struct mem_sharing_op_debug {
+            union {
+                unsigned long  gfn;        /* IN: gfn to debug          */
+                unsigned long  mfn;        /* IN: mfn to debug          */
+                grant_ref_t    gref;       /* IN: gref to debug         */
+            };
+        } debug;
+    };
+};
+typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
 
 
 struct xen_domctl {
@@ -772,6 +820,7 @@ struct xen_domctl {
         struct xen_domctl_subscribe         subscribe;
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_mem_event_op      mem_event_op;
+        struct xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
 #endif
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/include/public/memory.h
--- a/xen/include/public/memory.h       Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/public/memory.h       Thu Dec 17 06:27:56 2009 +0000
@@ -281,6 +281,13 @@ struct xen_pod_target {
     domid_t domid;
 };
 typedef struct xen_pod_target xen_pod_target_t;
+
+/*
+ * Get the number of MFNs saved through memory sharing.
+ * The call never fails. 
+ */
+#define XENMEM_get_sharing_freed_pages    18
+
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
 /*
diff -r 8cf5bffd9663 -r 68e964ec2c7b xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Thu Dec 17 06:27:56 2009 +0000
+++ b/xen/include/xen/sched.h   Thu Dec 17 06:27:56 2009 +0000
@@ -201,6 +201,7 @@ struct domain
     struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
     unsigned int     tot_pages;       /* number of pages currently possesed */
     unsigned int     max_pages;       /* maximum value for tot_pages        */
+    atomic_t         shr_pages;       /* number of shared pages             */
     unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
 
     unsigned int     max_vcpus;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Domctls defined for all relevant memory sharing operations., Xen patchbot-unstable <=