WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 09/15] xen: Initialize event channels and io rings

From: Anthony PERARD <anthony.perard@xxxxxxxxxx>

Open and bind event channels; map ioreq and buffered ioreq rings.

Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 hw/xen_machine_fv.c   |   25 ++++
 target-xen/cpu.h      |    1 +
 target-xen/helper.c   |  362 +++++++++++++++++++++++++++++++++++++++++++++++++
 target-xen/qemu-xen.h |    2 +
 4 files changed, 390 insertions(+), 0 deletions(-)

diff --git a/hw/xen_machine_fv.c b/hw/xen_machine_fv.c
index a6e778a..b1bc88d 100644
--- a/hw/xen_machine_fv.c
+++ b/hw/xen_machine_fv.c
@@ -22,6 +22,9 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  * THE SOFTWARE.
  */
+#include "config.h"
+
+#include <sys/mman.h>
 
 #include "hw.h"
 #include "pc.h"
@@ -71,12 +74,34 @@ static void xen_init_fv(ram_addr_t ram_size,
 
     CPUState *env;
 
+    unsigned long ioreq_pfn;
+    extern void *shared_page;
+    extern void *buffered_io_page;
+
     /* Initialize backend core & drivers */
     if (xen_dm_init() != 0) {
         fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
         exit(1);
     }
 
+    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
+    fprintf(stderr, "shared page at pfn %lx\n", ioreq_pfn);
+    shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
+            PROT_READ|PROT_WRITE, ioreq_pfn);
+    if (shared_page == NULL) {
+        fprintf(stderr, "map shared IO page returned error %d handle=%p\n", 
errno, xen_xc);
+        exit(-1);
+    }
+
+    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
+    fprintf(stderr, "buffered io page at pfn %lx\n", ioreq_pfn);
+    buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
+                                            PROT_READ|PROT_WRITE, ioreq_pfn);
+    if (buffered_io_page == NULL) {
+        fprintf(logfile, "map buffered IO page returned error %d\n", errno);
+        exit(-1);
+    }
+
     /* Initialize a dummy CPU */
     if (cpu_model == NULL) {
 #ifdef TARGET_X86_64
diff --git a/target-xen/cpu.h b/target-xen/cpu.h
index 5a45d1c..573241f 100644
--- a/target-xen/cpu.h
+++ b/target-xen/cpu.h
@@ -72,6 +72,7 @@ typedef struct CPUXenState {
 
 CPUXenState *cpu_xen_init(const char *cpu_model);
 int cpu_xen_exec(CPUXenState *s);
+void cpu_xen_close(CPUXenState *s);
 
 int cpu_get_pic_interrupt(CPUXenState *s);
 void cpu_set_ferr(CPUX86State *s);
diff --git a/target-xen/helper.c b/target-xen/helper.c
index 8cb7771..4571ac0 100644
--- a/target-xen/helper.c
+++ b/target-xen/helper.c
@@ -18,25 +18,77 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include "config.h"
+
+#include <inttypes.h>
+
+#include <xenctrl.h>
+#include <xen/hvm/ioreq.h>
+
 #include "cpu.h"
 #include "qemu-xen.h"
 #include "xenstore.h"
+#include "hw/xen_backend.h"
+
+long time_offset = 0;
+
+shared_iopage_t *shared_page = NULL;
+
+#define BUFFER_IO_MAX_DELAY  100
+buffered_iopage_t *buffered_io_page = NULL;
+QEMUTimer *buffered_io_timer;
+
+/* the evtchn fd for polling */
+int xce_handle = -1;
+
+/* which vcpu we are serving */
+int send_vcpu = 0;
+
+/* the evtchn port for polling the notification, */
+evtchn_port_t *ioreq_local_port;
 
 CPUXenState *cpu_xen_init(const char *cpu_model)
 {
     CPUXenState *env = NULL;
     static int inited;
+    int i, rc;
 
     env = qemu_mallocz(sizeof(CPUXenState));
     if (!env)
         return NULL;
     cpu_exec_init(env);
 
+    /* There is no shared_page for PV, we're done now */
+    if (shared_page == NULL)
+        return env;
+
+    ioreq_local_port =
+        (evtchn_port_t *)qemu_mallocz(smp_cpus * sizeof(evtchn_port_t));
+    if (!ioreq_local_port)
+        return NULL;
+
     /* init various static tables */
     if (!inited) {
         inited = 1;
 
         cpu_single_env = env;
+
+        xce_handle = xc_evtchn_open();
+        if (xce_handle == -1) {
+            perror("open");
+            return NULL;
+        }
+
+        /* FIXME: how about if we overflow the page here? */
+        for (i = 0; i < smp_cpus; i++) {
+            rc = xc_evtchn_bind_interdomain(
+                    xce_handle, xen_domid, 
shared_page->vcpu_ioreq[i].vp_eport);
+            if (rc == -1) {
+                fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
+                return NULL;
+            }
+            ioreq_local_port[i] = rc;
+        }
     }
 
     return env;
@@ -70,7 +122,317 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, 
target_ulong addr)
     return addr;
 }
 
+// get the ioreq packets from share mem
+static ioreq_t *__cpu_get_ioreq(int vcpu)
+{
+    ioreq_t *req = &shared_page->vcpu_ioreq[vcpu];
+
+    if (req->state != STATE_IOREQ_READY) {
+        fprintf(stderr, "I/O request not ready: "
+                "%x, ptr: %x, port: %"PRIx64", "
+                "data: %"PRIx64", count: %u, size: %u\n",
+                req->state, req->data_is_ptr, req->addr,
+                req->data, req->count, req->size);
+        return NULL;
+    }
+
+    xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+    req->state = STATE_IOREQ_INPROCESS;
+    return req;
+}
+
+// use poll to get the port notification
+// ioreq_vec--out,the
+// retval--the number of ioreq packet
+static ioreq_t *cpu_get_ioreq(void)
+{
+    int i;
+    evtchn_port_t port;
+
+    port = xc_evtchn_pending(xce_handle);
+    if (port != -1) {
+        for ( i = 0; i < smp_cpus; i++ )
+            if ( ioreq_local_port[i] == port )
+                break;
+
+        if ( i == smp_cpus ) {
+            fprintf(stderr, "Fatal error while trying to get io event!\n");
+            exit(1);
+        }
+
+        // unmask the wanted port again
+        xc_evtchn_unmask(xce_handle, port);
+
+        // get the io packet from shared memory
+        send_vcpu = i;
+        return __cpu_get_ioreq(i);
+    }
+
+    // read error or read nothing
+    return NULL;
+}
+
+static unsigned long do_inp(CPUState *env, unsigned long addr,
+        unsigned long size)
+{
+    switch(size) {
+        case 1:
+            return cpu_inb(addr);
+        case 2:
+            return cpu_inw(addr);
+        case 4:
+            return cpu_inl(addr);
+        default:
+            fprintf(stderr, "inp: bad size: %lx %lx\n", addr, size);
+            exit(-1);
+    }
+}
+
+static void do_outp(CPUState *env, unsigned long addr,
+        unsigned long size, unsigned long val)
+{
+    switch(size) {
+        case 1:
+            return cpu_outb(addr, val);
+        case 2:
+            return cpu_outw(addr, val);
+        case 4:
+            return cpu_outl(addr, val);
+        default:
+            fprintf(stderr, "outp: bad size: %lx %lx\n", addr, size);
+            exit(-1);
+    }
+}
+
+static inline void read_physical(uint64_t addr, unsigned long size, void *val)
+{
+    return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
+}
+
+static inline void write_physical(uint64_t addr, unsigned long size, void *val)
+{
+    return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
+}
+
+static void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
+{
+    int i, sign;
+
+    sign = req->df ? -1 : 1;
+
+    if (req->dir == IOREQ_READ) {
+        if (!req->data_is_ptr) {
+            req->data = do_inp(env, req->addr, req->size);
+        } else {
+            unsigned long tmp;
+
+            for (i = 0; i < req->count; i++) {
+                tmp = do_inp(env, req->addr, req->size);
+                write_physical((target_phys_addr_t) req->data
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+            }
+        }
+    } else if (req->dir == IOREQ_WRITE) {
+        if (!req->data_is_ptr) {
+            do_outp(env, req->addr, req->size, req->data);
+        } else {
+            for (i = 0; i < req->count; i++) {
+                unsigned long tmp = 0;
+
+                read_physical((target_phys_addr_t) req->data
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+                do_outp(env, req->addr, req->size, tmp);
+            }
+        }
+    }
+}
+
+static void cpu_ioreq_move(CPUState *env, ioreq_t *req)
+{
+    int i, sign;
+
+    sign = req->df ? -1 : 1;
+
+    if (!req->data_is_ptr) {
+        if (req->dir == IOREQ_READ) {
+            for (i = 0; i < req->count; i++) {
+                read_physical(req->addr
+                        + (sign * i * req->size),
+                        req->size, &req->data);
+            }
+        } else if (req->dir == IOREQ_WRITE) {
+            for (i = 0; i < req->count; i++) {
+                write_physical(req->addr
+                        + (sign * i * req->size),
+                        req->size, &req->data);
+            }
+        }
+    } else {
+        target_ulong tmp;
+
+        if (req->dir == IOREQ_READ) {
+            for (i = 0; i < req->count; i++) {
+                read_physical(req->addr
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+                write_physical((target_phys_addr_t )req->data
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+            }
+        } else if (req->dir == IOREQ_WRITE) {
+            for (i = 0; i < req->count; i++) {
+                read_physical((target_phys_addr_t) req->data
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+                write_physical(req->addr
+                        + (sign * i * req->size),
+                        req->size, &tmp);
+            }
+        }
+    }
+}
+
+static void cpu_ioreq_timeoffset(CPUState *env, ioreq_t *req)
+{
+    char b[64];
+
+    time_offset += (unsigned long)req->data;
+
+    fprintf(stderr, "Time offset set %ld, added offset %"PRId64"\n",
+            time_offset, req->data);
+    sprintf(b, "%ld", time_offset);
+    xenstore_vm_write(xen_domid, "rtc/timeoffset", b);
+}
+
+static void __handle_ioreq(CPUState *env, ioreq_t *req)
+{
+    if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
+            (req->size < sizeof(target_ulong)))
+        req->data &= ((target_ulong)1 << (8 * req->size)) - 1;
+
+    switch (req->type) {
+        case IOREQ_TYPE_PIO:
+            cpu_ioreq_pio(env, req);
+            break;
+        case IOREQ_TYPE_COPY:
+            cpu_ioreq_move(env, req);
+            break;
+        case IOREQ_TYPE_TIMEOFFSET:
+            cpu_ioreq_timeoffset(env, req);
+            break;
+        case IOREQ_TYPE_INVALIDATE:
+            qemu_invalidate_map_cache();
+            break;
+        default:
+            hw_error("Invalid ioreq type 0x%x\n", req->type);
+    }
+}
+
+static void __handle_buffered_iopage(CPUState *env)
+{
+    buf_ioreq_t *buf_req = NULL;
+    ioreq_t req;
+    int qw;
+
+    if (!buffered_io_page)
+        return;
+
+    while (buffered_io_page->read_pointer !=
+            buffered_io_page->write_pointer) {
+        buf_req = &buffered_io_page->buf_ioreq[
+            buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
+        req.size = 1UL << buf_req->size;
+        req.count = 1;
+        req.addr = buf_req->addr;
+        req.data = buf_req->data;
+        req.state = STATE_IOREQ_READY;
+        req.dir = buf_req->dir;
+        req.df = 1;
+        req.type = buf_req->type;
+        req.data_is_ptr = 0;
+        qw = (req.size == 8);
+        if (qw) {
+            buf_req = &buffered_io_page->buf_ioreq[
+                (buffered_io_page->read_pointer+1) % IOREQ_BUFFER_SLOT_NUM];
+            req.data |= ((uint64_t)buf_req->data) << 32;
+        }
+
+        __handle_ioreq(env, &req);
+
+        xen_mb();
+        buffered_io_page->read_pointer += qw ? 2 : 1;
+    }
+}
+
+static void handle_buffered_io(void *opaque)
+{
+    CPUState *env = opaque;
+
+    __handle_buffered_iopage(env);
+    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
+                   qemu_get_clock(rt_clock));
+}
+
+static void cpu_handle_ioreq(void *opaque)
+{
+    CPUState *env = opaque;
+    ioreq_t *req = cpu_get_ioreq();
+
+    __handle_buffered_iopage(env);
+    if (req) {
+        __handle_ioreq(env, req);
+
+        if (req->state != STATE_IOREQ_INPROCESS) {
+            fprintf(stderr, "Badness in I/O request ... not in service?!: "
+                    "%x, ptr: %x, port: %"PRIx64", "
+                    "data: %"PRIx64", count: %u, size: %u\n",
+                    req->state, req->data_is_ptr, req->addr,
+                    req->data, req->count, req->size);
+            destroy_hvm_domain();
+            return;
+        }
+
+        xen_wmb(); /* Update ioreq contents /then/ update state. */
+
+        req->state = STATE_IORESP_READY;
+        xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
+    }
+}
+
 void xen_main_loop_prepare(void)
 {
+    CPUState *env = cpu_single_env;
+
+    int evtchn_fd = xce_handle == -1 ? -1 : xc_evtchn_fd(xce_handle);
+
+    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
+                                       cpu_single_env);
+    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
+
+    if (evtchn_fd != -1)
+        qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
+
     xenstore_record_dm_state("running");
 }
+
+void destroy_hvm_domain(void)
+{
+    xc_interface *xcHandle;
+    int sts;
+
+    xcHandle = xc_interface_open(NULL, NULL, 0);
+    if (xcHandle < 0)
+        fprintf(stderr, "Cannot acquire xenctrl handle\n");
+    else {
+        sts = xc_domain_shutdown(xcHandle, xen_domid, SHUTDOWN_poweroff);
+        if (sts != 0)
+            fprintf(stderr, "? xc_domain_shutdown failed to issue poweroff, "
+                    "sts %d, errno %d\n", sts, errno);
+        else
+            fprintf(stderr, "Issued domain %d poweroff\n", xen_domid);
+        xc_interface_close(xcHandle);
+    }
+}
diff --git a/target-xen/qemu-xen.h b/target-xen/qemu-xen.h
index 091ae07..79a4638 100644
--- a/target-xen/qemu-xen.h
+++ b/target-xen/qemu-xen.h
@@ -22,12 +22,14 @@ void     qemu_invalidate_map_cache(void);
 
 /* target-xen/exec-dm.c */
 
+void destroy_hvm_domain(void);
 int cpu_register_io_memory_fixed(int io_index,
                            CPUReadMemoryFunc * const *mem_read,
                            CPUWriteMemoryFunc * const *mem_write,
                            void *opaque);
 
 /* target-xen/helper.c */
+extern int xce_handle;
 void xen_main_loop_prepare(void);
 
 #endif /*QEMU_XEN_H*/
-- 
1.7.0.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>