WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/2] xen: Introduce VGA sync dirty bitmap support

To: QEMU-devel <qemu-devel@xxxxxxxxxx>
Subject: [Xen-devel] [PATCH 2/2] xen: Introduce VGA sync dirty bitmap support
From: anthony.perard@xxxxxxxxxx
Date: Tue, 11 Jan 2011 15:37:03 +0000
Cc: Anthony PERARD <anthony.perard@xxxxxxxxxx>, Xen Devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Tue, 11 Jan 2011 07:40:56 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1294760223-26151-1-git-send-email-anthony.perard@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1294760223-26151-1-git-send-email-anthony.perard@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
From: Anthony PERARD <anthony.perard@xxxxxxxxxx>

This patch introduces phys memory client for Xen.

Only sync dirty_bitmap and set_memory are actually implemented.
migration_log will stay empty for the moment.

Xen can only log one range for bit change, so only the range in the
first call will be synced.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
---
 hw/vga.c   |    7 ++
 hw/xen.h   |    2 +
 xen-all.c  |  233 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 xen-stub.c |   11 +++
 4 files changed, 253 insertions(+), 0 deletions(-)

diff --git a/hw/vga.c b/hw/vga.c
index c057f4f..5f7a181 100644
--- a/hw/vga.c
+++ b/hw/vga.c
@@ -29,6 +29,7 @@
 #include "pixel_ops.h"
 #include "qemu-timer.h"
 #include "kvm.h"
+#include "xen.h"
 
 //#define DEBUG_VGA
 //#define DEBUG_VGA_MEM
@@ -1599,6 +1600,9 @@ void vga_dirty_log_start(VGACommonState *s)
 {
     if (kvm_enabled() && s->map_addr)
         kvm_log_start(s->map_addr, s->map_end - s->map_addr);
+    if (xen_enabled() && s->map_addr) {
+        xen_log_start(s->map_addr, s->map_end - s->map_addr);
+    }
 
     if (kvm_enabled() && s->lfb_vram_mapped) {
         kvm_log_start(isa_mem_base + 0xa0000, 0x8000);
@@ -1616,6 +1620,9 @@ void vga_dirty_log_stop(VGACommonState *s)
 {
     if (kvm_enabled() && s->map_addr)
        kvm_log_stop(s->map_addr, s->map_end - s->map_addr);
+    if (xen_enabled() && s->map_addr) {
+        xen_log_stop(s->map_addr, s->map_end - s->map_addr);
+    }
 
     if (kvm_enabled() && s->lfb_vram_mapped) {
        kvm_log_stop(isa_mem_base + 0xa0000, 0x8000);
diff --git a/hw/xen.h b/hw/xen.h
index 8920550..b6cb098 100644
--- a/hw/xen.h
+++ b/hw/xen.h
@@ -50,6 +50,8 @@ int xen_init(int smp_cpus);
 
 #if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size);
+int xen_log_start(target_phys_addr_t phys_addr, ram_addr_t size);
+int xen_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
 #endif
 
 #endif /* QEMU_HW_XEN_H */
diff --git a/xen-all.c b/xen-all.c
index 939d9b7..f38c803 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -63,6 +63,18 @@ typedef struct XenIOState {
     Notifier exit;
 } XenIOState;
 
+typedef struct XenPhysmap {
+    target_phys_addr_t start_addr;
+    ram_addr_t size;
+    target_phys_addr_t phys_offset;
+    int flags;
+
+    QLIST_ENTRY(XenPhysmap) list;
+} XenPhysmap;
+
+static QLIST_HEAD(, XenPhysmap) xen_physmap =
+    QLIST_HEAD_INITIALIZER(xen_physmap);
+
 /* Xen specific function for piix pci */
 
 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
@@ -162,6 +174,226 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
     qemu_free(pfn_list);
 }
 
+static XenPhysmap *link_exist(target_phys_addr_t start_addr)
+{
+    XenPhysmap *physmap = NULL;
+
+    start_addr = TARGET_PAGE_ALIGN(start_addr);
+    QLIST_FOREACH(physmap, &xen_physmap, list) {
+        if (physmap->size > 0 && physmap->start_addr == start_addr) {
+            return physmap;
+        }
+    }
+    return NULL;
+}
+
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
+static int already_physmapped(target_phys_addr_t phys_offset)
+{
+    XenPhysmap *physmap = NULL;
+
+    phys_offset = TARGET_PAGE_ALIGN(phys_offset);
+    QLIST_FOREACH(physmap, &xen_physmap, list) {
+        if (physmap->size > 0 && physmap->phys_offset <= phys_offset &&
+            phys_offset <= (physmap->phys_offset + physmap->size)) {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+static int xen_add_to_physmap(target_phys_addr_t start_addr,
+                              ram_addr_t size,
+                              target_phys_addr_t phys_offset)
+{
+    unsigned long i = 0;
+    int rc = 0;
+    XenPhysmap *physmap = NULL;
+
+    if (already_physmapped(phys_offset)) {
+        return 0;
+    }
+
+    DPRINTF("mapping vram to %llx - %llx, from %llx\n", start_addr, start_addr 
+ size, phys_offset);
+    for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
+        unsigned long idx = (phys_offset >> TARGET_PAGE_BITS) + i;
+
+        xen_pfn_t gpfn = (start_addr >> TARGET_PAGE_BITS) + i;
+        rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, 
idx, gpfn);
+        if (rc) {
+            fprintf(stderr, "xen: add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
+                    PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
+            return -rc;
+        }
+    }
+
+    physmap = qemu_malloc(sizeof (XenPhysmap));
+
+    physmap->start_addr = start_addr;
+    physmap->size = size;
+    physmap->phys_offset = phys_offset;
+
+    QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
+
+    xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
+                                   start_addr >> TARGET_PAGE_BITS,
+                                   (start_addr + size) >> TARGET_PAGE_BITS,
+                                   XEN_DOMCTL_MEM_CACHEATTR_WB);
+    return 0;
+}
+
+static int xen_remove_from_physmap(target_phys_addr_t start_addr,
+                                   ram_addr_t size)
+{
+    unsigned long i = 0;
+    int rc = 0;
+    XenPhysmap *physmap = NULL;
+    target_phys_addr_t phys_offset = 0;
+
+    physmap = link_exist(start_addr);
+    if (physmap == NULL) {
+        return -1;
+    }
+
+    phys_offset = physmap->phys_offset;
+    size = physmap->size;
+
+    DPRINTF("unmapping vram to %llx - %llx, from %llx\n", phys_offset, 
phys_offset + size, start_addr);
+    for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
+        unsigned long idx = (start_addr >> TARGET_PAGE_BITS) + i;
+
+        xen_pfn_t gpfn = (phys_offset >> TARGET_PAGE_BITS) + i;
+        rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, 
idx, gpfn);
+        if (rc) {
+            fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
+                    PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
+            return -rc;
+        }
+    }
+
+    QLIST_REMOVE(physmap, list);
+    free(physmap);
+
+    return 0;
+}
+
+#else
+static int xen_add_to_physmap(target_phys_addr_t start_addr,
+                              ram_addr_t size,
+                              target_phys_addr_t phys_offset)
+{
+    return -ENOSYS;
+}
+
+static int xen_remove_from_physmap(target_phys_addr_t start_addr,
+                                   ram_addr_t size)
+{
+    return -ENOSYS;
+}
+#endif
+
+static void xen_client_set_memory(struct CPUPhysMemoryClient *client,
+                                 target_phys_addr_t start_addr,
+                                 ram_addr_t size,
+                                 ram_addr_t phys_offset)
+{
+    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
+    hvmmem_type_t mem_type;
+
+    start_addr = TARGET_PAGE_ALIGN(start_addr);
+    size = TARGET_PAGE_ALIGN(size);
+    phys_offset = TARGET_PAGE_ALIGN(phys_offset);
+
+    /* Xen does not need to know about this memory */
+    if (flags > IO_MEM_UNASSIGNED) {
+        return;
+    }
+
+    switch (flags) {
+    case IO_MEM_RAM:
+        xen_add_to_physmap(start_addr, size, phys_offset);
+        break;
+    case IO_MEM_ROM:
+        mem_type = HVMMEM_ram_ro;
+        if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
+                                start_addr >> TARGET_PAGE_BITS,
+                                size >> TARGET_PAGE_BITS)) {
+            DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
+                    start_addr);
+        }
+        break;
+    case IO_MEM_UNASSIGNED:
+        if (xen_remove_from_physmap(start_addr, size) < 0) {
+            DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", 
start_addr);
+        }
+        break;
+    }
+}
+
+static int xen_sync_dirty_bitmap(target_phys_addr_t start_addr,
+                                 ram_addr_t size)
+{
+    target_phys_addr_t npages = size >> TARGET_PAGE_BITS;
+    target_phys_addr_t vram_offset = 0;
+    const int width = sizeof(unsigned long) * 8;
+    unsigned long bitmap[(npages + width - 1) / width];
+    int rc, i, j;
+    XenPhysmap *physmap = NULL;
+
+    physmap = link_exist(start_addr);
+    if (physmap) {
+        vram_offset = physmap->phys_offset;
+    } else {
+        vram_offset = start_addr;
+    }
+    rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
+                                 start_addr >> TARGET_PAGE_BITS, npages,
+                                 bitmap);
+    if (rc) {
+        return rc;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
+        unsigned long map = bitmap[i];
+        while (map != 0) {
+            j = ffsl(map) - 1;
+            map &= ~(1ul << j);
+            cpu_physical_memory_set_dirty(vram_offset + (i * width + j) * 
TARGET_PAGE_SIZE);
+        };
+    }
+
+    return 0;
+}
+
+int xen_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+    return xen_sync_dirty_bitmap(phys_addr, size);
+}
+
+int xen_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+    /* Disable dirty bit tracking */
+    return xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
+}
+
+static int xen_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
+                                       target_phys_addr_t start_addr,
+                                       target_phys_addr_t end_addr)
+{
+    return xen_sync_dirty_bitmap(start_addr, end_addr - start_addr);
+}
+
+static int xen_client_migration_log(struct CPUPhysMemoryClient *client,
+                                   int enable)
+{
+    return -1;
+}
+
+static CPUPhysMemoryClient xen_cpu_phys_memory_client = {
+    .set_memory = xen_client_set_memory,
+    .sync_dirty_bitmap = xen_client_sync_dirty_bitmap,
+    .migration_log = xen_client_migration_log,
+};
 
 /* VCPU Operations, MMIO, IO ring ... */
 
@@ -552,6 +784,7 @@ int xen_init(int smp_cpus)
     xen_ram_init(ram_size);
 
     qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state);
+    cpu_register_phys_memory_client(&xen_cpu_phys_memory_client);
 
     return 0;
 }
diff --git a/xen-stub.c b/xen-stub.c
index d22f475..f6feee7 100644
--- a/xen-stub.c
+++ b/xen-stub.c
@@ -28,6 +28,17 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
 {
 }
 
+int xen_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+    return -ENOSYS;
+}
+
+int xen_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
+{
+    return -ENOSYS;
+}
+
+
 void xen_set_hvm_sleep_state(void)
 {
 }
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel