WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH V6 13/15] xen: Initialize event channels and io rings

To: QEMU-devel <qemu-devel@xxxxxxxxxx>
Subject: [Xen-devel] [PATCH V6 13/15] xen: Initialize event channels and io rings
From: anthony.perard@xxxxxxxxxx
Date: Thu, 21 Oct 2010 18:36:25 +0100
Cc: Arun Sharma <arun.sharma@xxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Xen Devel <xen-devel@xxxxxxxxxxxxxxxxxxx>, Anthony Liguori <anthony@xxxxxxxxxxxxx>, Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Delivery-date: Thu, 21 Oct 2010 10:56:12 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1287682587-18642-1-git-send-email-anthony.perard@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1287682587-18642-1-git-send-email-anthony.perard@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
From: Arun Sharma <arun.sharma@xxxxxxxxx>

Open and bind event channels; map ioreq and buffered ioreq rings.

Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>
Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 hw/xen_common.h |    3 +
 xen-all.c       |  407 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 410 insertions(+), 0 deletions(-)

diff --git a/hw/xen_common.h b/hw/xen_common.h
index 2773b45..b3d1fe3 100644
--- a/hw/xen_common.h
+++ b/hw/xen_common.h
@@ -26,6 +26,7 @@
 /* Xen unstable */
 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410
 typedef int qemu_xc_interface;
+#  define XC_INTERFACE_FMT "%i"
 #  define XC_HANDLER_INITIAL_VALUE               -1
 #  define xc_fd(xen_xc)                          xen_xc
 #  define xc_interface_open(l, dl, f)            xc_interface_open()
@@ -40,6 +41,7 @@ typedef int qemu_xc_interface;
     xc_map_foreign_batch(xc, domid, opts, pfns, size)
 #else
 typedef xc_interface *qemu_xc_interface;
+#  define XC_INTERFACE_FMT "%p"
 #  define XC_HANDLER_INITIAL_VALUE NULL
 /* FIXME The fd of xen_xc is now xen_xc->fd */
 /* fd is the first field, so this works */
@@ -47,5 +49,6 @@ typedef xc_interface *qemu_xc_interface;
 #endif
 
 qemu_irq *i8259_xen_init(void);
+void destroy_hvm_domain(void);
 
 #endif /* QEMU_HW_XEN_COMMON_H */
diff --git a/xen-all.c b/xen-all.c
index 3048c4d..a0d1b9b 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -8,12 +8,58 @@
 
 #include "config.h"
 
+#include <sys/mman.h>
+
 #include "hw/pci.h"
 #include "hw/xen_common.h"
 #include "hw/xen_backend.h"
 
 #include "xen-mapcache.h"
 
+#include <xen/hvm/ioreq.h>
+#include <xen/hvm/params.h>
+
+//#define DEBUG_XEN
+
+#ifdef DEBUG_XEN
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+/* Compatibility with older version */
+#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
+#  define xen_vcpu_eport(shared_page, i) \
+    (shared_page->vcpu_iodata[i].vp_eport)
+#  define xen_vcpu_ioreq(shared_page, vcpu) \
+    (shared_page->vcpu_iodata[vcpu].vp_ioreq)
+#  define FMT_ioreq_size PRIx64
+#else
+#  define xen_vcpu_eport(shared_page, i) \
+    (shared_page->vcpu_ioreq[i].vp_eport)
+#  define xen_vcpu_ioreq(shared_page, vcpu) \
+    (shared_page->vcpu_ioreq[vcpu])
+#  define FMT_ioreq_size "u"
+#endif
+
+#define BUFFER_IO_MAX_DELAY  100
+
+typedef struct XenIOState {
+    shared_iopage_t *shared_page;
+    buffered_iopage_t *buffered_io_page;
+    QEMUTimer *buffered_io_timer;
+    /* the evtchn port for polling the notification, */
+    evtchn_port_t *ioreq_local_port;
+    /* the evtchn fd for polling */
+    int xce_handle;
+    /* which vcpu we are serving */
+    int send_vcpu;
+
+    Notifier exit;
+} XenIOState;
+
 /* Xen specific function for piix pci */
 
 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
@@ -114,19 +160,380 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
 }
 
 
+/* VCPU Operations, MMIO, IO ring ... */
+
+/* get the ioreq packets from share mem */
+static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
+{
+    ioreq_t *req = xen_vcpu_ioreq(&state->shared_page, vcpu);
+
+    if (req->state != STATE_IOREQ_READY) {
+        DPRINTF("I/O request not ready: "
+                "%x, ptr: %x, port: %"PRIx64", "
+                "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" 
FMT_ioreq_size "\n",
+                req->state, req->data_is_ptr, req->addr,
+                req->data, req->count, req->size);
+        return NULL;
+    }
+
+    xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+    req->state = STATE_IOREQ_INPROCESS;
+    return req;
+}
+
+/* use poll to get the port notification */
+/* ioreq_vec--out,the */
+/* retval--the number of ioreq packet */
+static ioreq_t *cpu_get_ioreq(XenIOState *state)
+{
+    int i;
+    evtchn_port_t port;
+
+    port = xc_evtchn_pending(state->xce_handle);
+    if (port != -1) {
+        for (i = 0; i < smp_cpus; i++) {
+            if (state->ioreq_local_port[i] == port) {
+                break;
+            }
+        }
+
+        if (i == smp_cpus) {
+            hw_error("Fatal error while trying to get io event!\n");
+        }
+
+        /* unmask the wanted port again */
+        xc_evtchn_unmask(state->xce_handle, port);
+
+        /* get the io packet from shared memory */
+        state->send_vcpu = i;
+        return cpu_get_ioreq_from_shared_memory(state, i);
+    }
+
+    /* read error or read nothing */
+    return NULL;
+}
+
+static uint32_t do_inp(pio_addr_t addr, unsigned long size)
+{
+    switch (size) {
+        case 1:
+            return cpu_inb(addr);
+        case 2:
+            return cpu_inw(addr);
+        case 4:
+            return cpu_inl(addr);
+        default:
+            hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
+    }
+}
+
+static void do_outp(pio_addr_t addr,
+        unsigned long size, uint32_t val)
+{
+    switch (size) {
+        case 1:
+            return cpu_outb(addr, val);
+        case 2:
+            return cpu_outw(addr, val);
+        case 4:
+            return cpu_outl(addr, val);
+        default:
+            hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
+    }
+}
+
+static void cpu_ioreq_pio(ioreq_t *req)
+{
+    int i, sign;
+
+    sign = req->df ? -1 : 1;
+
+    if (req->dir == IOREQ_READ) {
+        if (!req->data_is_ptr) {
+            req->data = do_inp(req->addr, req->size);
+        } else {
+            uint32_t tmp;
+
+            for (i = 0; i < req->count; i++) {
+                tmp = do_inp(req->addr, req->size);
+                cpu_physical_memory_write(req->data + (sign * i * req->size),
+                        (uint8_t *) &tmp, req->size);
+            }
+        }
+    } else if (req->dir == IOREQ_WRITE) {
+        if (!req->data_is_ptr) {
+            do_outp(req->addr, req->size, req->data);
+        } else {
+            for (i = 0; i < req->count; i++) {
+                uint32_t tmp = 0;
+
+                cpu_physical_memory_read(req->data + (sign * i * req->size),
+                        (uint8_t*) &tmp, req->size);
+                do_outp(req->addr, req->size, tmp);
+            }
+        }
+    }
+}
+
+static void cpu_ioreq_move(ioreq_t *req)
+{
+    int i, sign;
+
+    sign = req->df ? -1 : 1;
+
+    if (!req->data_is_ptr) {
+        if (req->dir == IOREQ_READ) {
+            for (i = 0; i < req->count; i++) {
+                cpu_physical_memory_read(req->addr + (sign * i * req->size),
+                        (uint8_t *) &req->data, req->size);
+            }
+        } else if (req->dir == IOREQ_WRITE) {
+            for (i = 0; i < req->count; i++) {
+                cpu_physical_memory_write(req->addr + (sign * i * req->size),
+                        (uint8_t *) &req->data, req->size);
+            }
+        }
+    } else {
+        target_ulong tmp;
+
+        if (req->dir == IOREQ_READ) {
+            for (i = 0; i < req->count; i++) {
+                cpu_physical_memory_read(req->addr + (sign * i * req->size),
+                        (uint8_t*) &tmp, req->size);
+                cpu_physical_memory_write(req->data + (sign * i * req->size),
+                        (uint8_t*) &tmp, req->size);
+            }
+        } else if (req->dir == IOREQ_WRITE) {
+            for (i = 0; i < req->count; i++) {
+                cpu_physical_memory_read(req->data + (sign * i * req->size),
+                        (uint8_t*) &tmp, req->size);
+                cpu_physical_memory_write(req->addr + (sign * i * req->size),
+                        (uint8_t*) &tmp, req->size);
+            }
+        }
+    }
+}
+
+static void handle_ioreq(ioreq_t *req)
+{
+    if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
+            (req->size < sizeof (target_ulong))) {
+        req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
+    }
+
+    switch (req->type) {
+        case IOREQ_TYPE_PIO:
+            cpu_ioreq_pio(req);
+            break;
+        case IOREQ_TYPE_COPY:
+            cpu_ioreq_move(req);
+            break;
+        case IOREQ_TYPE_TIMEOFFSET:
+            break;
+        case IOREQ_TYPE_INVALIDATE:
+            qemu_invalidate_map_cache();
+            break;
+        default:
+            hw_error("Invalid ioreq type 0x%x\n", req->type);
+    }
+}
+
+static void handle_buffered_iopage(XenIOState *state)
+{
+    buf_ioreq_t *buf_req = NULL;
+    ioreq_t req;
+    int qw;
+
+    if (!state->buffered_io_page) {
+        return;
+    }
+
+    while (state->buffered_io_page->read_pointer != 
state->buffered_io_page->write_pointer) {
+        buf_req = &state->buffered_io_page->buf_ioreq[
+            state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
+        req.size = 1UL << buf_req->size;
+        req.count = 1;
+        req.addr = buf_req->addr;
+        req.data = buf_req->data;
+        req.state = STATE_IOREQ_READY;
+        req.dir = buf_req->dir;
+        req.df = 1;
+        req.type = buf_req->type;
+        req.data_is_ptr = 0;
+        qw = (req.size == 8);
+        if (qw) {
+            buf_req = &state->buffered_io_page->buf_ioreq[
+                (state->buffered_io_page->read_pointer + 1) % 
IOREQ_BUFFER_SLOT_NUM];
+            req.data |= ((uint64_t)buf_req->data) << 32;
+        }
+
+        handle_ioreq(&req);
+
+        xen_mb();
+        state->buffered_io_page->read_pointer += qw ? 2 : 1;
+    }
+}
+
+static void handle_buffered_io(void *opaque)
+{
+    XenIOState *state = opaque;
+
+    handle_buffered_iopage(state);
+    qemu_mod_timer(state->buffered_io_timer,
+                   BUFFER_IO_MAX_DELAY + qemu_get_clock(rt_clock));
+}
+
+static void cpu_handle_ioreq(void *opaque)
+{
+    XenIOState *state = opaque;
+    ioreq_t *req = cpu_get_ioreq(state);
+
+    handle_buffered_iopage(state);
+    if (req) {
+        handle_ioreq(req);
+
+        if (req->state != STATE_IOREQ_INPROCESS) {
+            fprintf(stderr, "Badness in I/O request ... not in service?!: "
+                    "%x, ptr: %x, port: %"PRIx64", "
+                    "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" 
FMT_ioreq_size "\n",
+                    req->state, req->data_is_ptr, req->addr,
+                    req->data, req->count, req->size);
+            destroy_hvm_domain();
+            return;
+        }
+
+        xen_wmb(); /* Update ioreq contents /then/ update state. */
+
+        /*
+         * We do this before we send the response so that the tools
+         * have the opportunity to pick up on the reset before the
+         * guest resumes and does a hlt with interrupts disabled which
+         * causes Xen to powerdown the domain.
+         */
+        if (vm_running) {
+            if (qemu_shutdown_requested_get()) {
+                destroy_hvm_domain();
+            }
+            if (qemu_reset_requested_get()) {
+                qemu_system_reset();
+            }
+        }
+
+        req->state = STATE_IORESP_READY;
+        xc_evtchn_notify(state->xce_handle, 
state->ioreq_local_port[state->send_vcpu]);
+    }
+}
+
+static void xen_main_loop_prepare(XenIOState *state)
+{
+    int evtchn_fd = state->xce_handle == -1 ? -1 : 
xc_evtchn_fd(state->xce_handle);
+
+    state->buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
+                                       state);
+    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock(rt_clock));
+
+    if (evtchn_fd != -1) {
+        qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
+    }
+}
+
+
 /* Initialise Xen */
 
+static void xen_vm_change_state_handler(void *opaque, int running, int reason)
+{
+    XenIOState *state = opaque;
+    if (running) {
+        xen_main_loop_prepare(state);
+    }
+}
+
+static void xen_exit_notifier(Notifier *n)
+{
+    XenIOState *state = container_of(n, XenIOState, exit);
+
+    xc_evtchn_close(state->xce_handle);
+}
+
 int xen_init(int smp_cpus)
 {
+    int i, rc;
+    unsigned long ioreq_pfn;
+    XenIOState *state;
+
     xen_xc = xc_interface_open(NULL, NULL, 0);
     if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
         xen_be_printf(NULL, 0, "can't open xen interface\n");
         return -1;
     }
 
+    state = qemu_mallocz(sizeof (XenIOState));
+
+    state->xce_handle = xc_evtchn_open();
+    if (state->xce_handle == -1) {
+        perror("xen: event channel open");
+        return -errno;
+    }
+
+    state->exit.notify = xen_exit_notifier;
+    qemu_add_exit_notifier(&state->exit);
+
+    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+    DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
+    state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
+                                               PROT_READ|PROT_WRITE, 
ioreq_pfn);
+    if (state->shared_page == NULL) {
+        hw_error("map shared IO page returned error %d handle=" 
XC_INTERFACE_FMT,
+                 errno, xen_xc);
+    }
+
+    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
+    DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
+    state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, 
XC_PAGE_SIZE,
+                                            PROT_READ|PROT_WRITE, ioreq_pfn);
+    if (state->buffered_io_page == NULL) {
+        hw_error("map buffered IO page returned error %d", errno);
+    }
+
+    state->ioreq_local_port = qemu_mallocz(smp_cpus * sizeof (evtchn_port_t));
+
+    /* FIXME: how about if we overflow the page here? */
+    for (i = 0; i < smp_cpus; i++) {
+        rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+                                        xen_vcpu_eport(state->shared_page, i));
+        if (rc == -1) {
+            fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
+            return -1;
+        }
+        state->ioreq_local_port[i] = rc;
+    }
+
     /* Init RAM management */
     qemu_map_cache_init();
     xen_ram_init(ram_size);
 
+    qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state);
+
     return 0;
 }
+
+void destroy_hvm_domain(void)
+{
+    qemu_xc_interface xc_handle;
+    int sts;
+
+    xc_handle = xc_interface_open(NULL, NULL, 0);
+    if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
+        fprintf(stderr, "Cannot acquire xenctrl handle\n");
+    } else {
+        sts = xc_domain_shutdown(xc_handle, xen_domid, SHUTDOWN_poweroff);
+        if (sts != 0) {
+            fprintf(stderr, "? xc_domain_shutdown failed to issue poweroff, "
+                    "sts %d, %s\n", sts, strerror(errno));
+        } else {
+            fprintf(stderr, "Issued domain %d poweroff\n", xen_domid);
+        }
+        xc_interface_close(xc_handle);
+    }
+}
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel