WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: libxc: maintain a small, per-handle, cache of hypercall buffer memor

To: Haitao Shan <maillists.shan@xxxxxxxxx>
Subject: Re: libxc: maintain a small, per-handle, cache of hypercall buffer memory (Was: Re: [Xen-devel] Xen 4.1 rc1 test report)
From: Ian Campbell <Ian.Campbell@xxxxxxxxxxxxx>
Date: Mon, 31 Jan 2011 09:32:20 +0000
Cc: "Zheng, Shaohui" <shaohui.zheng@xxxxxxxxx>, Keir Fraser <keir@xxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Mon, 31 Jan 2011 01:33:15 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <AANLkTik0P=kBYN_dJBgwk43KhN9REU4T10y-5=0=Ebf9@xxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Citrix Systems, Inc.
References: <A24AE1FFE7AEC5489F83450EE98351BF2BF2EC4C9D@xxxxxxxxxxxxxxxxxxxxxxxxxxxx> <AANLkTim5QgVj82uwE8fWRZNk0EKu5iyY2tzbe3d2k4Y+@xxxxxxxxxxxxxx> <1295955798.14780.5930.camel@xxxxxxxxxxxxxxxxxxxxxx> <AANLkTiky=TUKvryg583fqPWGehdTcCMPv1hhBoCqm1J=@xxxxxxxxxxxxxx> <1296039431.14780.6753.camel@xxxxxxxxxxxxxxxxxxxxxx> <AANLkTi=+2YqkzJNEaweYsiY78XFdTsdCNonVz_T0sjyV@xxxxxxxxxxxxxx> <1296462612.20804.181.camel@xxxxxxxxxxxxxxxxxxxxx> <AANLkTik0P=kBYN_dJBgwk43KhN9REU4T10y-5=0=Ebf9@xxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
On Mon, 2011-01-31 at 08:57 +0000, Haitao Shan wrote:
> And BTW: I am using c/s 22846. 

Sorry, I didn't notice a patch which I was holding off for 4.2 in my
queue before this one.

Version rebased onto 22846:52e928af3637 below.

Ian.

8<---------------------------------------

# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1296466278 0
# Node ID 9e6175469e6f246ee9370ef57f987bb435b00bef
# Parent  5b6663ba2bb2c54e8fa6745afa16297ebe43328d
libxc: maintain a small, per-handle, cache of hypercall buffer memory

Constantly m(un)locking memory can have significant overhead on
systems with large numbers of CPUs. This was previously fixed by
20841:fbe8f32fa257 but this was dropped during the transition to
hypercall buffers.

Introduce a small cache of single page hypercall buffer allocations
which can be resused to avoid this overhead.

Add some statistics tracking to the hypercall buffer allocations.

The cache size of 4 was chosen based on these statistics since they
indicated that 2 pages was sufficient to satisfy all concurrent single
page hypercall buffer allocations seen during "xl create", "xl
shutdown" and "xl destroy" of both a PV and HVM guest therefore 4
pages should cover the majority of important cases.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 5b6663ba2bb2 -r 9e6175469e6f tools/libxc/xc_hcall_buf.c
--- a/tools/libxc/xc_hcall_buf.c        Mon Jan 31 09:14:52 2011 +0000
+++ b/tools/libxc/xc_hcall_buf.c        Mon Jan 31 09:31:18 2011 +0000
@@ -18,6 +18,7 @@
 
 #include <stdlib.h>
 #include <malloc.h>
+#include <pthread.h>
 
 #include "xc_private.h"
 #include "xg_private.h"
@@ -28,31 +29,137 @@ xc_hypercall_buffer_t XC__HYPERCALL_BUFF
     HYPERCALL_BUFFER_INIT_NO_BOUNCE
 };
 
+pthread_mutex_t hypercall_buffer_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void hypercall_buffer_cache_lock(xc_interface *xch)
+{
+    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
+        return;
+    pthread_mutex_lock(&hypercall_buffer_cache_mutex);
+}
+
+static void hypercall_buffer_cache_unlock(xc_interface *xch)
+{
+    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
+        return;
+    pthread_mutex_unlock(&hypercall_buffer_cache_mutex);
+}
+
+static void *hypercall_buffer_cache_alloc(xc_interface *xch, int nr_pages)
+{
+    void *p = NULL;
+
+    hypercall_buffer_cache_lock(xch);
+
+    xch->hypercall_buffer_total_allocations++;
+    xch->hypercall_buffer_current_allocations++;
+    if ( xch->hypercall_buffer_current_allocations > 
xch->hypercall_buffer_maximum_allocations )
+        xch->hypercall_buffer_maximum_allocations = 
xch->hypercall_buffer_current_allocations;
+
+    if ( nr_pages > 1 )
+    {
+        xch->hypercall_buffer_cache_toobig++;
+    }
+    else if ( xch->hypercall_buffer_cache_nr > 0 )
+    {
+        p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr];
+        xch->hypercall_buffer_cache_hits++;
+    }
+    else
+    {
+        xch->hypercall_buffer_cache_misses++;
+    }
+
+    hypercall_buffer_cache_unlock(xch);
+
+    return p;
+}
+
+static int hypercall_buffer_cache_free(xc_interface *xch, void *p, int 
nr_pages)
+{
+    int rc = 0;
+
+    hypercall_buffer_cache_lock(xch);
+
+    xch->hypercall_buffer_total_releases++;
+    xch->hypercall_buffer_current_allocations--;
+
+    if ( nr_pages == 1 && xch->hypercall_buffer_cache_nr < 
HYPERCALL_BUFFER_CACHE_SIZE )
+    {
+        xch->hypercall_buffer_cache[xch->hypercall_buffer_cache_nr++] = p;
+        rc = 1;
+    }
+
+    hypercall_buffer_cache_unlock(xch);
+
+    return rc;
+}
+
+static void do_hypercall_buffer_free_pages(void *ptr, int nr_pages)
+{
+#ifndef __sun__
+    (void) munlock(ptr, nr_pages * PAGE_SIZE);
+#endif
+
+    free(ptr);
+}
+
+void xc__hypercall_buffer_cache_release(xc_interface *xch)
+{
+    void *p;
+
+    hypercall_buffer_cache_lock(xch);
+
+    DBGPRINTF("hypercall buffer: total allocations:%d total releases:%d",
+              xch->hypercall_buffer_total_allocations,
+              xch->hypercall_buffer_total_releases);
+    DBGPRINTF("hypercall buffer: current allocations:%d maximum 
allocations:%d",
+              xch->hypercall_buffer_current_allocations,
+              xch->hypercall_buffer_maximum_allocations);
+    DBGPRINTF("hypercall buffer: cache current size:%d",
+              xch->hypercall_buffer_cache_nr);
+    DBGPRINTF("hypercall buffer: cache hits:%d misses:%d toobig:%d",
+              xch->hypercall_buffer_cache_hits,
+              xch->hypercall_buffer_cache_misses,
+              xch->hypercall_buffer_cache_toobig);
+
+    while ( xch->hypercall_buffer_cache_nr > 0 )
+    {
+        p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr];
+        do_hypercall_buffer_free_pages(p, 1);
+    }
+
+    hypercall_buffer_cache_unlock(xch);
+}
+
 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, 
xc_hypercall_buffer_t *b, int nr_pages)
 {
     size_t size = nr_pages * PAGE_SIZE;
-    void *p;
+    void *p = hypercall_buffer_cache_alloc(xch, nr_pages);
+
+    if ( !p ) {
 #if defined(_POSIX_C_SOURCE) && !defined(__sun__)
-    int ret;
-    ret = posix_memalign(&p, PAGE_SIZE, size);
-    if (ret != 0)
-        return NULL;
+        int ret;
+        ret = posix_memalign(&p, PAGE_SIZE, size);
+        if (ret != 0)
+            return NULL;
 #elif defined(__NetBSD__) || defined(__OpenBSD__)
-    p = valloc(size);
+        p = valloc(size);
 #else
-    p = memalign(PAGE_SIZE, size);
+        p = memalign(PAGE_SIZE, size);
 #endif
 
-    if (!p)
-        return NULL;
+        if (!p)
+            return NULL;
 
 #ifndef __sun__
-    if ( mlock(p, size) < 0 )
-    {
-        free(p);
-        return NULL;
+        if ( mlock(p, size) < 0 )
+        {
+            free(p);
+            return NULL;
+        }
+#endif
     }
-#endif
 
     b->hbuf = p;
 
@@ -65,11 +172,8 @@ void xc__hypercall_buffer_free_pages(xc_
     if ( b->hbuf == NULL )
         return;
 
-#ifndef __sun__
-    (void) munlock(b->hbuf, nr_pages * PAGE_SIZE);
-#endif
-
-    free(b->hbuf);
+    if ( !hypercall_buffer_cache_free(xch, b->hbuf, nr_pages) )
+        do_hypercall_buffer_free_pages(b->hbuf, nr_pages);
 }
 
 struct allocation_header {
diff -r 5b6663ba2bb2 -r 9e6175469e6f tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Mon Jan 31 09:14:52 2011 +0000
+++ b/tools/libxc/xc_private.c  Mon Jan 31 09:31:18 2011 +0000
@@ -126,6 +126,16 @@ static struct xc_interface_core *xc_inte
     xch->error_handler   = logger;           xch->error_handler_tofree   = 0;
     xch->dombuild_logger = dombuild_logger;  xch->dombuild_logger_tofree = 0;
 
+    xch->hypercall_buffer_cache_nr = 0;
+
+    xch->hypercall_buffer_total_allocations = 0;
+    xch->hypercall_buffer_total_releases = 0;
+    xch->hypercall_buffer_current_allocations = 0;
+    xch->hypercall_buffer_maximum_allocations = 0;
+    xch->hypercall_buffer_cache_hits = 0;
+    xch->hypercall_buffer_cache_misses = 0;
+    xch->hypercall_buffer_cache_toobig = 0;
+
     xch->ops_handle = XC_OSDEP_OPEN_ERROR;
     xch->ops = NULL;
 
@@ -171,6 +181,8 @@ static int xc_interface_close_common(xc_
 static int xc_interface_close_common(xc_interface *xch)
 {
     int rc = 0;
+
+    xc__hypercall_buffer_cache_release(xch);
 
     xtl_logger_destroy(xch->dombuild_logger_tofree);
     xtl_logger_destroy(xch->error_handler_tofree);
diff -r 5b6663ba2bb2 -r 9e6175469e6f tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Mon Jan 31 09:14:52 2011 +0000
+++ b/tools/libxc/xc_private.h  Mon Jan 31 09:31:18 2011 +0000
@@ -75,6 +75,28 @@ struct xc_interface_core {
     FILE *dombuild_logger_file;
     const char *currently_progress_reporting;
 
+    /*
+     * A simple cache of unused, single page, hypercall buffers
+     *
+     * Protected by a global lock.
+     */
+#define HYPERCALL_BUFFER_CACHE_SIZE 4
+    int hypercall_buffer_cache_nr;
+    void *hypercall_buffer_cache[HYPERCALL_BUFFER_CACHE_SIZE];
+
+    /*
+     * Hypercall buffer statistics. All protected by the global
+     * hypercall_buffer_cache lock.
+     */
+    int hypercall_buffer_total_allocations;
+    int hypercall_buffer_total_releases;
+    int hypercall_buffer_current_allocations;
+    int hypercall_buffer_maximum_allocations;
+    int hypercall_buffer_cache_hits;
+    int hypercall_buffer_cache_misses;
+    int hypercall_buffer_cache_toobig;
+
+    /* Low lovel OS interface */
     xc_osdep_info_t  osdep;
     xc_osdep_ops    *ops; /* backend operations */
     xc_osdep_handle  ops_handle; /* opaque data for xc_osdep_ops */
@@ -156,6 +178,11 @@ int xc__hypercall_bounce_pre(xc_interfac
 #define xc_hypercall_bounce_pre(_xch, _name) xc__hypercall_bounce_pre(_xch, 
HYPERCALL_BUFFER(_name))
 void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t 
*bounce);
 #define xc_hypercall_bounce_post(_xch, _name) xc__hypercall_bounce_post(_xch, 
HYPERCALL_BUFFER(_name))
+
+/*
+ * Release hypercall buffer cache
+ */
+void xc__hypercall_buffer_cache_release(xc_interface *xch);
 
 /*
  * Hypercall interfaces.



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>