WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH V7 11/15] Introduce qemu_ram_ptr_unlock.

To: QEMU-devel <qemu-devel@xxxxxxxxxx>
Subject: [Xen-devel] [PATCH V7 11/15] Introduce qemu_ram_ptr_unlock.
From: anthony.perard@xxxxxxxxxx
Date: Tue, 23 Nov 2010 19:51:46 +0000
Cc: Anthony PERARD <anthony.perard@xxxxxxxxxx>, Xen Devel <xen-devel@xxxxxxxxxxxxxxxxxxx>, Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Delivery-date: Tue, 23 Nov 2010 12:12:41 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1290541910-11332-1-git-send-email-anthony.perard@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1290541910-11332-1-git-send-email-anthony.perard@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
From: Anthony PERARD <anthony.perard@xxxxxxxxxx>

This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After
a call to qemu_ram_ptr_unlock, the pointer may be unmap from QEMU when
used with Xen.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
---
 cpu-common.h   |    1 +
 exec.c         |   10 ++++++++++
 xen-mapcache.c |   34 ++++++++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+), 0 deletions(-)

diff --git a/cpu-common.h b/cpu-common.h
index a543b5d..8ec01f4 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -46,6 +46,7 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, 
ram_addr_t size);
 void qemu_ram_free(ram_addr_t addr);
 /* This should only be used for ram local to a device.  */
 void *qemu_get_ram_ptr(ram_addr_t addr);
+void qemu_ram_ptr_unlock(void *addr);
 /* This should not be used by devices.  */
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 3eb4d04..c6ed96d 100644
--- a/exec.c
+++ b/exec.c
@@ -2950,6 +2950,13 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
     return NULL;
 }
 
+void qemu_ram_ptr_unlock(void *addr)
+{
+    if (xen_mapcache_enabled()) {
+        qemu_map_cache_unlock(addr);
+    }
+}
+
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
 {
     RAMBlock *block;
@@ -3539,6 +3546,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                     cpu_physical_memory_set_dirty_flags(
                         addr1, (0xff & ~CODE_DIRTY_FLAG));
                 }
+                qemu_ram_ptr_unlock(ptr);
             }
         } else {
             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3569,6 +3577,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
                     (addr & ~TARGET_PAGE_MASK);
                 memcpy(buf, ptr, l);
+                qemu_ram_ptr_unlock(ptr);
             }
         }
         len -= l;
@@ -3609,6 +3618,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t 
addr,
             /* ROM/RAM case */
             ptr = qemu_get_ram_ptr(addr1);
             memcpy(ptr, buf, l);
+            qemu_ram_ptr_unlock(ptr);
         }
         len -= l;
         buf += l;
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 3e1cca9..23a23f9 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -187,6 +187,40 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, 
target_phys_addr_t size, u
     return mapcache->last_address_vaddr + address_offset;
 }
 
+void qemu_map_cache_unlock(void *buffer)
+{
+    MapCacheEntry *entry = NULL, *pentry = NULL;
+    MapCacheRev *reventry;
+    target_phys_addr_t paddr_index;
+    int found = 0;
+
+    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
+        if (reventry->vaddr_req == buffer) {
+            paddr_index = reventry->paddr_index;
+            found = 1;
+            break;
+        }
+    }
+    if (!found) {
+        return;
+    }
+    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+    qemu_free(reventry);
+
+    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
+    while (entry && entry->paddr_index != paddr_index) {
+        pentry = entry;
+        entry = entry->next;
+    }
+    if (!entry) {
+        return;
+    }
+    entry->lock--;
+    if (entry->lock > 0) {
+        entry->lock--;
+    }
+}
+
 ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
 {
     MapCacheRev *reventry;
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>