WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1 of 2] mem_event: pass mem_event_domain pointer to m

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 1 of 2] mem_event: pass mem_event_domain pointer to mem_event functions
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Wed, 07 Sep 2011 14:15:29 +0200
Delivery-date: Wed, 07 Sep 2011 05:16:28 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1315397739; l=12264; s=domk; d=aepfle.de; h=To:From:Date:References:In-Reply-To:Subject: Content-Transfer-Encoding:MIME-Version:Content-Type:X-RZG-CLASS-ID: X-RZG-AUTH; bh=0LzsNQN5PMphWGf0bBuhALSDXpI=; b=rlf9VcZcFCNQz8HKBCqPigH6MzReWdIMHkcbUeFUCqPHCIMpUcgVaOQw26kzXIy1dYJ fP/LuiVzhQPQfW3qTEcTrg1HSRecJv/eLEgmK/JvOmFIc4BCF2MmYRT79ruNr0gKBHW7o E9KPRPq8PqGBaHw+6kzNFSa+x02k5RPxC+o=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1315397728@xxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1315397728@xxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.7.5
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1315389343 -7200
# Node ID 4e7bcc14b76a8e37bc8e2fe490ef6b3209bd8738
# Parent  0268e73809532a4a3ca18a075efcee3c62caf458
mem_event: pass mem_event_domain pointer to mem_event functions

Pass a struct mem_event_domain pointer to the various mem_event
functions.  This will be used in a subsequent patch which creates
different ring buffers for the memshare, xenpaging and memaccess
functionality.

Remove the struct domain argument from some functions.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 0268e7380953 -r 4e7bcc14b76a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4025,7 +4025,7 @@ static int hvm_memory_event_traps(long p
     if ( (p & HVMPME_onchangeonly) && (value == old) )
         return 1;
     
-    rc = mem_event_check_ring(d);
+    rc = mem_event_check_ring(&d->mem_event, d->domain_id);
     if ( rc )
         return rc;
     
@@ -4048,7 +4048,7 @@ static int hvm_memory_event_traps(long p
         req.gla_valid = 1;
     }
     
-    mem_event_put_request(d, &req);      
+    mem_event_put_request(d, &d->mem_event, &req);
     
     return 1;
 }
diff -r 0268e7380953 -r 4e7bcc14b76a xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -33,21 +33,21 @@
 #define xen_rmb()  rmb()
 #define xen_wmb()  wmb()
 
-#define mem_event_ring_lock_init(_d)  
spin_lock_init(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock_init(_med)  spin_lock_init(&(_med)->ring_lock)
+#define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
+#define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
 
-static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d, struct mem_event_domain *med, 
mfn_t ring_mfn, mfn_t shared_mfn)
 {
     int rc;
 
     /* Map ring and shared pages */
-    d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn));
-    if ( d->mem_event.ring_page == NULL )
+    med->ring_page = map_domain_page(mfn_x(ring_mfn));
+    if ( med->ring_page == NULL )
         goto err;
 
-    d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn));
-    if ( d->mem_event.shared_page == NULL )
+    med->shared_page = map_domain_page(mfn_x(shared_mfn));
+    if ( med->shared_page == NULL )
         goto err_ring;
 
     /* Allocate event channel */
@@ -56,15 +56,15 @@ static int mem_event_enable(struct domai
     if ( rc < 0 )
         goto err_shared;
 
-    ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc;
-    d->mem_event.xen_port = rc;
+    ((mem_event_shared_page_t *)med->shared_page)->port = rc;
+    med->xen_port = rc;
 
     /* Prepare ring buffer */
-    FRONT_RING_INIT(&d->mem_event.front_ring,
-                    (mem_event_sring_t *)d->mem_event.ring_page,
+    FRONT_RING_INIT(&med->front_ring,
+                    (mem_event_sring_t *)med->ring_page,
                     PAGE_SIZE);
 
-    mem_event_ring_lock_init(d);
+    mem_event_ring_lock_init(med);
 
     /* Wake any VCPUs paused for memory events */
     mem_event_unpause_vcpus(d);
@@ -72,34 +72,34 @@ static int mem_event_enable(struct domai
     return 0;
 
  err_shared:
-    unmap_domain_page(d->mem_event.shared_page);
-    d->mem_event.shared_page = NULL;
+    unmap_domain_page(med->shared_page);
+    med->shared_page = NULL;
  err_ring:
-    unmap_domain_page(d->mem_event.ring_page);
-    d->mem_event.ring_page = NULL;
+    unmap_domain_page(med->ring_page);
+    med->ring_page = NULL;
  err:
     return 1;
 }
 
-static int mem_event_disable(struct domain *d)
+static int mem_event_disable(struct mem_event_domain *med)
 {
-    unmap_domain_page(d->mem_event.ring_page);
-    d->mem_event.ring_page = NULL;
+    unmap_domain_page(med->ring_page);
+    med->ring_page = NULL;
 
-    unmap_domain_page(d->mem_event.shared_page);
-    d->mem_event.shared_page = NULL;
+    unmap_domain_page(med->shared_page);
+    med->shared_page = NULL;
 
     return 0;
 }
 
-void mem_event_put_request(struct domain *d, mem_event_request_t *req)
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, 
mem_event_request_t *req)
 {
     mem_event_front_ring_t *front_ring;
     RING_IDX req_prod;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    front_ring = &d->mem_event.front_ring;
+    front_ring = &med->front_ring;
     req_prod = front_ring->req_prod_pvt;
 
     /* Copy request */
@@ -107,23 +107,23 @@ void mem_event_put_request(struct domain
     req_prod++;
 
     /* Update ring */
-    d->mem_event.req_producers--;
+    med->req_producers--;
     front_ring->req_prod_pvt = req_prod;
     RING_PUSH_REQUESTS(front_ring);
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 
-    notify_via_xen_event_channel(d, d->mem_event.xen_port);
+    notify_via_xen_event_channel(d, med->xen_port);
 }
 
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp)
 {
     mem_event_front_ring_t *front_ring;
     RING_IDX rsp_cons;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    front_ring = &d->mem_event.front_ring;
+    front_ring = &med->front_ring;
     rsp_cons = front_ring->rsp_cons;
 
     /* Copy response */
@@ -134,7 +134,7 @@ void mem_event_get_response(struct domai
     front_ring->rsp_cons = rsp_cons;
     front_ring->sring->rsp_event = rsp_cons + 1;
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 }
 
 void mem_event_unpause_vcpus(struct domain *d)
@@ -152,35 +152,35 @@ void mem_event_mark_and_pause(struct vcp
     vcpu_sleep_nosync(v);
 }
 
-void mem_event_put_req_producers(struct domain *d)
+void mem_event_put_req_producers(struct mem_event_domain *med)
 {
-    mem_event_ring_lock(d);
-    d->mem_event.req_producers--;
-    mem_event_ring_unlock(d);
+    mem_event_ring_lock(med);
+    med->req_producers--;
+    mem_event_ring_unlock(med);
 }
 
-int mem_event_check_ring(struct domain *d)
+int mem_event_check_ring(struct mem_event_domain *med, domid_t dom_id)
 {
     struct vcpu *curr = current;
     int free_requests;
     int ring_full = 1;
 
-    if ( !d->mem_event.ring_page )
+    if ( !med->ring_page )
         return -1;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
-    if ( d->mem_event.req_producers < free_requests )
+    free_requests = RING_FREE_REQUESTS(&med->front_ring);
+    if ( med->req_producers < free_requests )
     {
-        d->mem_event.req_producers++;
+        med->req_producers++;
         ring_full = 0;
     }
 
-    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
+    if ( (curr->domain->domain_id == dom_id) && ring_full )
         mem_event_mark_and_pause(curr);
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 
     return ring_full;
 }
@@ -230,6 +230,7 @@ int mem_event_domctl(struct domain *d, x
         {
             struct domain *dom_mem_event = current->domain;
             struct vcpu *v = current;
+            struct mem_event_domain *med = &d->mem_event;
             unsigned long ring_addr = mec->ring_addr;
             unsigned long shared_addr = mec->shared_addr;
             l1_pgentry_t l1e;
@@ -242,7 +243,7 @@ int mem_event_domctl(struct domain *d, x
              * the cache is in an undefined state and so is the guest
              */
             rc = -EBUSY;
-            if ( d->mem_event.ring_page )
+            if ( med->ring_page )
                 break;
 
             /* Currently only EPT is supported */
@@ -270,7 +271,7 @@ int mem_event_domctl(struct domain *d, x
                 break;
 
             rc = -EINVAL;
-            if ( mem_event_enable(d, ring_mfn, shared_mfn) != 0 )
+            if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 )
                 break;
 
             rc = 0;
@@ -279,7 +280,7 @@ int mem_event_domctl(struct domain *d, x
 
         case XEN_DOMCTL_MEM_EVENT_OP_DISABLE:
         {
-            rc = mem_event_disable(d);
+            rc = mem_event_disable(&d->mem_event);
         }
         break;
 
diff -r 0268e7380953 -r 4e7bcc14b76a xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -281,12 +281,12 @@ static struct page_info* mem_sharing_all
     vcpu_pause_nosync(v);
     req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
-    if(mem_event_check_ring(d)) return page;
+    if(mem_event_check_ring(&d->mem_event, d->domain_id)) return page;
 
     req.gfn = gfn;
     req.p2mt = p2m_ram_shared;
     req.vcpu_id = v->vcpu_id;
-    mem_event_put_request(d, &req);
+    mem_event_put_request(d, &d->mem_event, &req);
 
     return page;
 }
@@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct do
     mem_event_response_t rsp;
 
     /* Get request off the ring */
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Unpause domain/vcpu */
     if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 0268e7380953 -r 4e7bcc14b76a xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -755,7 +755,7 @@ void p2m_mem_paging_drop_page(struct dom
     mem_event_request_t req;
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d) == 0)
+    if ( mem_event_check_ring(&d->mem_event, d->domain_id) == 0)
     {
         /* Send release notification to pager */
         memset(&req, 0, sizeof(req));
@@ -763,7 +763,7 @@ void p2m_mem_paging_drop_page(struct dom
         req.gfn = gfn;
         req.vcpu_id = v->vcpu_id;
 
-        mem_event_put_request(d, &req);
+        mem_event_put_request(d, &d->mem_event, &req);
     }
 }
 
@@ -775,7 +775,7 @@ void p2m_mem_paging_populate(struct doma
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d) )
+    if ( mem_event_check_ring(&d->mem_event, d->domain_id) )
         return;
 
     memset(&req, 0, sizeof(req));
@@ -803,7 +803,7 @@ void p2m_mem_paging_populate(struct doma
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        mem_event_put_req_producers(d);
+        mem_event_put_req_producers(&d->mem_event);
         return;
     }
 
@@ -812,7 +812,7 @@ void p2m_mem_paging_populate(struct doma
     req.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &req);
+    mem_event_put_request(d, &d->mem_event, &req);
 }
 
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
@@ -842,7 +842,7 @@ void p2m_mem_paging_resume(struct domain
     mfn_t mfn;
 
     /* Pull the response off the ring */
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -889,7 +889,7 @@ void p2m_mem_access_check(unsigned long 
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the 
message along */
-    res = mem_event_check_ring(d);
+    res = mem_event_check_ring(&d->mem_event, d->domain_id);
     if ( res < 0 ) 
     {
         /* No listener */
@@ -933,7 +933,7 @@ void p2m_mem_access_check(unsigned long 
     
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &req);   
+    mem_event_put_request(d, &d->mem_event, &req);
 
     /* VCPU paused, mem event request sent */
 }
@@ -943,7 +943,7 @@ void p2m_mem_access_resume(struct p2m_do
     struct domain *d = p2m->domain;
     mem_event_response_t rsp;
 
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 0268e7380953 -r 4e7bcc14b76a xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h
+++ b/xen/include/asm-x86/mem_event.h
@@ -26,10 +26,10 @@
 
 /* Pauses VCPU while marking pause flag for mem event */
 void mem_event_mark_and_pause(struct vcpu *v);
-int mem_event_check_ring(struct domain *d);
-void mem_event_put_req_producers(struct domain *d);
-void mem_event_put_request(struct domain *d, mem_event_request_t *req);
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
+int mem_event_check_ring(struct mem_event_domain *med, domid_t dom_id);
+void mem_event_put_req_producers(struct mem_event_domain *med);
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, 
mem_event_request_t *req);
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp);
 void mem_event_unpause_vcpus(struct domain *d);
 
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel