WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] mem_event: Clean up and remove over-sized

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] mem_event: Clean up and remove over-sized paused_vcpus[] array.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 09 Oct 2010 14:55:16 -0700
Delivery-date: Sat, 09 Oct 2010 14:56:36 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1284561942 -3600
# Node ID cf70ef051a8216bc161f3cf5627b207f66b374c7
# Parent  ff011e0cb17c29db8a46ce046f6074a27461cfb8
mem_event: Clean up and remove over-sized paused_vcpus[] array.

This cuts the size of the domain structure by around 30kB! It is now a
little over a page in size.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/mm/mem_event.c     |   50 +++++++++++++++-------------------------
 xen/include/asm-x86/mem_event.h |   25 --------------------
 xen/include/xen/sched.h         |   12 ++-------
 3 files changed, 22 insertions(+), 65 deletions(-)

diff -r ff011e0cb17c -r cf70ef051a82 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Wed Sep 15 14:03:26 2010 +0100
+++ b/xen/arch/x86/mm/mem_event.c       Wed Sep 15 15:45:42 2010 +0100
@@ -27,16 +27,18 @@
 #include <asm/mem_event.h>
 #include <asm/mem_paging.h>
 
-
+/* for public/io/ring.h macros */
 #define xen_mb()   mb()
 #define xen_rmb()  rmb()
 #define xen_wmb()  wmb()
 
+#define mem_event_ring_lock_init(_d)  
spin_lock_init(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
 
 #define MEM_EVENT_RING_THRESHOLD 4
 
-
-int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
 {
     int rc;
 
@@ -64,9 +66,6 @@ int mem_event_enable(struct domain *d, m
                     PAGE_SIZE);
 
     mem_event_ring_lock_init(d);
-
-    d->mem_event.paused = 0;
-    d->mem_event.enabled = 1;
 
     return 0;
 
@@ -80,11 +79,8 @@ int mem_event_enable(struct domain *d, m
     return 1;
 }
 
-int mem_event_disable(struct domain *d)
-{
-    d->mem_event.enabled = 0;
-    d->mem_event.paused = 0;
-
+static int mem_event_disable(struct domain *d)
+{
     unmap_domain_page(d->mem_event.ring_page);
     d->mem_event.ring_page = NULL;
 
@@ -142,26 +138,14 @@ void mem_event_unpause_vcpus(struct doma
 {
     struct vcpu *v;
 
-    for_each_vcpu(d, v)
-    {
-        if ( d->mem_event.paused_vcpus[v->vcpu_id] )
-        {
-            vcpu_unpause(v);
-            d->mem_event.paused_vcpus[v->vcpu_id] = 0;
-        }
-    }
-}
-
-int mem_event_pause_vcpu(struct domain *d, struct vcpu *v)
-{
-    vcpu_pause_nosync(v);
-    d->mem_event.paused_vcpus[v->vcpu_id] = 1;
-
-    return 0;
+    for_each_vcpu ( d, v )
+        if ( test_and_clear_bit(_VPF_mem_event, &v->pause_flags) )
+            vcpu_wake(v);
 }
 
 int mem_event_check_ring(struct domain *d)
 {
+    struct vcpu *curr = current;
     int free_requests;
     int ring_full;
 
@@ -170,8 +154,11 @@ int mem_event_check_ring(struct domain *
     free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
     ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
 
-    if ( (current->domain->domain_id == d->domain_id) && ring_full )
-        mem_event_pause_vcpu(d, current);
+    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
+    {
+        set_bit(_VPF_mem_event, &curr->pause_flags);
+        vcpu_sleep_nosync(curr);
+    }
 
     mem_event_ring_unlock(d);
 
@@ -198,8 +185,9 @@ int mem_event_domctl(struct domain *d, x
 
     if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
     {
-        MEM_EVENT_ERROR("Memory paging op on a domain (%u) with no vcpus\n",
-                         d->domain_id);
+        gdprintk(XENLOG_INFO,
+                 "Memory paging op on a domain (%u) with no vcpus\n",
+                 d->domain_id);
         return -EINVAL;
     }
 
diff -r ff011e0cb17c -r cf70ef051a82 xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h   Wed Sep 15 14:03:26 2010 +0100
+++ b/xen/include/asm-x86/mem_event.h   Wed Sep 15 15:45:42 2010 +0100
@@ -24,31 +24,6 @@
 #ifndef __MEM_EVENT_H__
 #define __MEM_EVENT_H__
 
-
-/* Printouts */
-#define MEM_EVENT_PRINTK(_f, _a...)                                      \
-    debugtrace_printk("mem_event: %s(): " _f, __func__, ##_a)
-#define MEM_EVENT_ERROR(_f, _a...)                                       \
-    printk("mem_event error: %s(): " _f, __func__, ##_a)
-#define MEM_EVENT_DEBUG(flag, _f, _a...)                                 \
-    do {                                                                  \
-        if (MEM_EVENT_DEBUG_ ## flag)                                    \
-            debugtrace_printk("mem_event debug: %s(): " _f, __func__, ##_a); \
-    } while (0)
-
-
-#define mem_event_enabled(_d) (_d)->mem_event.enabled
-
-
-/* Ring lock */
-#define mem_event_ring_lock_init(_d)  
spin_lock_init(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
-
-
-int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn);
-int mem_event_disable(struct domain *d);
-
 int mem_event_check_ring(struct domain *d);
 void mem_event_put_request(struct domain *d, mem_event_request_t *req);
 void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
diff -r ff011e0cb17c -r cf70ef051a82 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Wed Sep 15 14:03:26 2010 +0100
+++ b/xen/include/xen/sched.h   Wed Sep 15 15:45:42 2010 +0100
@@ -188,15 +188,6 @@ struct mem_event_domain
     void *ring_page;
     /* front-end ring */
     mem_event_front_ring_t front_ring;
-    /* if domain has been paused due to ring contention */
-    bool_t paused;
-    int paused_vcpus[MAX_VIRT_CPUS];
-    /* the memory event mode */
-    unsigned long mode;
-    /* domain to receive memory events */
-    struct domain *domain;
-    /* enabled? */
-    bool_t enabled;
     /* event channel port (vcpu0 only) */
     int xen_port;
 };
@@ -581,6 +572,9 @@ extern struct domain *domain_list;
  /* VCPU affinity has changed: migrating to a new CPU. */
 #define _VPF_migrating       3
 #define VPF_migrating        (1UL<<_VPF_migrating)
+ /* VCPU is blocked on memory-event ring. */
+#define _VPF_mem_event       4
+#define VPF_mem_event        (1UL<<_VPF_mem_event)
 
 static inline int vcpu_runnable(struct vcpu *v)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] mem_event: Clean up and remove over-sized paused_vcpus[] array., Xen patchbot-unstable <=