WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] save restore: save restore dev in H

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] save restore: save restore dev in HV
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 18 Jan 2007 19:05:15 -0800
Delivery-date: Thu, 18 Jan 2007 19:05:37 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1169138885 0
# Node ID 5f340f19bbb780b539672453b1203c7b5100a20e
# Parent  44668189f354a3d928e4e5a37df416ffc2992772
[HVM] save restore: save restore dev in HV

Signed-off-by: Zhai Edwin <edwin.zhai@xxxxxxxxx>

save/restore all dev state in HV such as PIT/PIC/APIC
---
 xen/arch/x86/hvm/hvm.c            |    3 
 xen/arch/x86/hvm/i8254.c          |  153 +++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/hvm/intercept.c      |   59 ++++++++++++++
 xen/arch/x86/hvm/rtc.c            |    2 
 xen/arch/x86/hvm/vioapic.c        |  128 +++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vlapic.c         |   74 ++++++++++++++++++
 xen/arch/x86/hvm/vpic.c           |   83 ++++++++++++++++++++
 xen/arch/x86/hvm/vpt.c            |    6 -
 xen/include/asm-x86/hvm/support.h |    2 
 xen/include/asm-x86/hvm/vpt.h     |    2 
 10 files changed, 504 insertions(+), 8 deletions(-)

diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jan 18 16:48:05 2007 +0000
@@ -197,6 +197,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
     rtc_init(v, RTC_PORT(0), RTC_IRQ);
     pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
     hpet_init(v);
+ 
+    /* init hvm sharepage */
+    shpage_init(v->domain, get_sp(v->domain));
 
     /* Init guest TSC to start from zero. */
     hvm_set_guest_time(v, 0);
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c  Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/i8254.c  Thu Jan 18 16:48:05 2007 +0000
@@ -207,11 +207,11 @@ static inline void pit_load_count(PITCha
     switch (s->mode) {
         case 2:
             /* create periodic time */
-            create_periodic_time(&s->pt, period, 0, 0, pit_time_fired, s);
+            create_periodic_time(current, &s->pt, period, 0, 0, 
pit_time_fired, s);
             break;
         case 1:
             /* create one shot time */
-            create_periodic_time(&s->pt, period, 0, 1, pit_time_fired, s);
+            create_periodic_time(current, &s->pt, period, 0, 1, 
pit_time_fired, s);
 #ifdef DEBUG_PIT
             printk("HVM_PIT: create one shot time.\n");
 #endif
@@ -356,6 +356,154 @@ void pit_stop_channel0_irq(PITState * pi
     destroy_periodic_time(&s->pt);
 }
 
+#ifdef HVM_DEBUG_SUSPEND
+static void pit_info(PITState *pit)
+{
+    PITChannelState *s;
+    int i;
+
+    for(i = 0; i < 3; i++) {
+        printk("*****pit channel %d's state:*****\n", i);
+        s = &pit->channels[i];
+        printk("pit 0x%x.\n", s->count);
+        printk("pit 0x%x.\n", s->latched_count);
+        printk("pit 0x%x.\n", s->count_latched);
+        printk("pit 0x%x.\n", s->status_latched);
+        printk("pit 0x%x.\n", s->status);
+        printk("pit 0x%x.\n", s->read_state);
+        printk("pit 0x%x.\n", s->write_state);
+        printk("pit 0x%x.\n", s->write_latch);
+        printk("pit 0x%x.\n", s->rw_mode);
+        printk("pit 0x%x.\n", s->mode);
+        printk("pit 0x%x.\n", s->bcd);
+        printk("pit 0x%x.\n", s->gate);
+        printk("pit %"PRId64"\n", s->count_load_time);
+
+        if (s->pt) {
+            struct periodic_time *pt = s->pt;
+            printk("pit channel %d has a periodic timer:\n", i);
+            printk("pt %d.\n", pt->enabled);
+            printk("pt %d.\n", pt->one_shot);
+            printk("pt %d.\n", pt->irq);
+            printk("pt %d.\n", pt->first_injected);
+
+            printk("pt %d.\n", pt->pending_intr_nr);
+            printk("pt %d.\n", pt->period);
+            printk("pt %"PRId64"\n", pt->period_cycles);
+            printk("pt %"PRId64"\n", pt->last_plt_gtime);
+        }
+    }
+
+}
+#else
+static void pit_info(PITState *pit)
+{
+}
+#endif
+
+static void pit_save(hvm_domain_context_t *h, void *opaque)
+{
+    struct domain *d = opaque;
+    PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
+    PITChannelState *s;
+    struct periodic_time *pt;
+    int i, pti = -1;
+    
+    pit_info(pit);
+
+    for(i = 0; i < 3; i++) {
+        s = &pit->channels[i];
+        hvm_put_32u(h, s->count);
+        hvm_put_16u(h, s->latched_count);
+        hvm_put_8u(h, s->count_latched);
+        hvm_put_8u(h, s->status_latched);
+        hvm_put_8u(h, s->status);
+        hvm_put_8u(h, s->read_state);
+        hvm_put_8u(h, s->write_state);
+        hvm_put_8u(h, s->write_latch);
+        hvm_put_8u(h, s->rw_mode);
+        hvm_put_8u(h, s->mode);
+        hvm_put_8u(h, s->bcd);
+        hvm_put_8u(h, s->gate);
+        hvm_put_64u(h, s->count_load_time);
+
+        if (s->pt.enabled && pti == -1)
+            pti = i;
+    }
+
+    pt = &pit->channels[pti].pt;
+
+    /* save the vcpu for pt */
+    hvm_put_32u(h, pt->vcpu->vcpu_id);
+
+    /* save guest time */
+    hvm_put_8u(h, pti);
+    hvm_put_32u(h, pt->pending_intr_nr);
+    hvm_put_64u(h, pt->last_plt_gtime);
+
+}
+
+static int pit_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+    struct domain *d = opaque;
+    PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
+    PITChannelState *s;
+    int i, pti, vcpu_id;
+    u32 period;
+
+    if (version_id != 1)
+        return -EINVAL;
+
+    for(i = 0; i < 3; i++) {
+        s = &pit->channels[i];
+        s->count = hvm_get_32u(h);
+        s->latched_count = hvm_get_16u(h);
+        s->count_latched = hvm_get_8u(h);
+        s->status_latched = hvm_get_8u(h);
+        s->status = hvm_get_8u(h);
+        s->read_state = hvm_get_8u(h);
+        s->write_state = hvm_get_8u(h);
+        s->write_latch = hvm_get_8u(h);
+        s->rw_mode = hvm_get_8u(h);
+        s->mode = hvm_get_8u(h);
+        s->bcd = hvm_get_8u(h);
+        s->gate = hvm_get_8u(h);
+        s->count_load_time = hvm_get_64u(h);
+    }
+
+    vcpu_id = hvm_get_32u(h);
+
+    pti = hvm_get_8u(h);
+    if ( pti < 0 || pti > 2) {
+        printk("pit load get a wrong channel %d when HVM resume.\n", pti);
+        return -EINVAL;
+    }
+
+    s = &pit->channels[pti];
+    period = DIV_ROUND((s->count * 1000000000ULL), PIT_FREQ);
+
+    printk("recreate periodic timer %d in mode %d, freq=%d.\n", pti, s->mode, 
period);
+    switch (s->mode) {
+        case 2:
+            /* create periodic time */
+            create_periodic_time(d->vcpu[vcpu_id], &s->pt, period, 0, 0, 
pit_time_fired, s);
+            break;
+        case 1:
+            /* create one shot time */
+            create_periodic_time(d->vcpu[vcpu_id], &s->pt, period, 0, 1, 
pit_time_fired, s);
+            break;
+        default:
+            printk("pit mode %"PRId8" should not use periodic timer!\n", 
s->mode);
+            return -EINVAL;
+    }
+    s->pt.pending_intr_nr = hvm_get_32u(h);
+    s->pt.last_plt_gtime = hvm_get_64u(h);
+
+    pit_info(pit);
+
+    return 0;
+}
+
 static void pit_reset(void *opaque)
 {
     PITState *pit = opaque;
@@ -383,6 +531,7 @@ void pit_init(struct vcpu *v, unsigned l
     s++; s->pt.vcpu = v;
     s++; s->pt.vcpu = v;
 
+    hvm_register_savevm(v->domain, "xen_hvm_i8254", PIT_BASE, 1, pit_save, 
pit_load, v->domain);
     register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
     /* register the speaker port */
     register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/intercept.c      Thu Jan 18 16:48:05 2007 +0000
@@ -386,6 +386,65 @@ int arch_sethvm_ctxt(
     return hvm_load(v, c);
 }
 
+#ifdef HVM_DEBUG_SUSPEND
+static void shpage_info(shared_iopage_t *sh)
+{
+
+    vcpu_iodata_t *p = &sh->vcpu_iodata[0];
+    ioreq_t *req = &p->vp_ioreq;
+    printk("*****sharepage_info******!\n");
+    printk("vp_eport=%d\n", p->vp_eport);
+    printk("io packet: "
+                     "state:%x, pvalid: %x, dir:%x, port: %"PRIx64", "
+                     "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+                     req->state, req->data_is_ptr, req->dir, req->addr,
+                     req->data, req->count, req->size);
+}
+#else
+static void shpage_info(shared_iopage_t *sh)
+{
+}
+#endif
+
+static void shpage_save(hvm_domain_context_t *h, void *opaque)
+{
+    /* XXX:no action required for shpage save/restore, since it's in guest 
memory
+     * keep it for debug purpose only */
+
+#if 0
+    struct shared_iopage *s = opaque;
+    /* XXX:smp */
+    struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
+    
+    shpage_info(s);
+
+    hvm_put_buffer(h, (char*)req, sizeof(struct ioreq));
+#endif
+}
+
+static int shpage_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+    struct shared_iopage *s = opaque;
+#if 0
+    /* XXX:smp */
+    struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
+
+    if (version_id != 1)
+        return -EINVAL;
+
+    hvm_get_buffer(h, (char*)req, sizeof(struct ioreq));
+
+
+#endif
+    shpage_info(s);
+    return 0;
+}
+
+void shpage_init(struct domain *d, shared_iopage_t *sp)
+{
+    hvm_register_savevm(d, "xen_hvm_shpage", 0x10, 1, shpage_save, 
shpage_load, sp);
+}
+
 int hvm_buffered_io_intercept(ioreq_t *p)
 {
     struct vcpu *v = current;
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/rtc.c
--- a/xen/arch/x86/hvm/rtc.c    Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/rtc.c    Thu Jan 18 16:48:05 2007 +0000
@@ -62,7 +62,7 @@ static void rtc_timer_update(RTCState *s
 #ifdef DEBUG_RTC
         printk("HVM_RTC: period = %uns\n", period);
 #endif
-        create_periodic_time(&s->pt, period, RTC_IRQ, 0, rtc_periodic_cb, s);
+        create_periodic_time(current, &s->pt, period, RTC_IRQ, 0, 
rtc_periodic_cb, s);
     } 
     else
         destroy_periodic_time(&s->pt);
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/vioapic.c        Thu Jan 18 16:48:05 2007 +0000
@@ -473,10 +473,138 @@ void vioapic_update_EOI(struct domain *d
     spin_unlock(&hvm_irq->lock);
 }
 
+#ifdef HVM_DEBUG_SUSPEND
+static void ioapic_info(struct vioapic *s)
+{
+    int i;
+    printk("*****ioapic state:*****\n");
+    printk("ioapic 0x%x.\n", s->ioregsel);
+    printk("ioapic 0x%x.\n", s->id);
+    printk("ioapic 0x%lx.\n", s->base_address);
+    for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+        printk("ioapic redirtbl[%d]:0x%"PRIx64"\n", i, s->redirtbl[i].bits);
+    }
+
+}
+static void hvmirq_info(struct hvm_irq *hvm_irq)
+{
+    int i;
+    printk("*****hvmirq state:*****\n");
+    for (i = 0; i < BITS_TO_LONGS(32*4); i++)
+        printk("hvmirq pci_intx[%d]:0x%lx.\n", i, hvm_irq->pci_intx[i]);
+
+    for (i = 0; i < BITS_TO_LONGS(16); i++)
+        printk("hvmirq isa_irq[%d]:0x%lx.\n", i, hvm_irq->isa_irq[i]);
+
+    for (i = 0; i < BITS_TO_LONGS(1); i++)
+        printk("hvmirq callback_irq_wire[%d]:0x%lx.\n", i, 
hvm_irq->callback_irq_wire[i]);
+
+    printk("hvmirq callback_gsi:0x%x.\n", hvm_irq->callback_gsi);
+
+    for (i = 0; i < 4; i++)
+        printk("hvmirq pci_link_route[%d]:0x%"PRIx8".\n", i, 
hvm_irq->pci_link_route[i]);
+
+    for (i = 0; i < 4; i++)
+        printk("hvmirq pci_link_assert_count[%d]:0x%"PRIx8".\n", i, 
hvm_irq->pci_link_assert_count[i]);
+
+    for (i = 0; i < 4; i++)
+        printk("hvmirq gsi_assert_count[%d]:0x%"PRIx8".\n", i, 
hvm_irq->gsi_assert_count[i]);
+
+    printk("hvmirq round_robin_prev_vcpu:0x%"PRIx8".\n", 
hvm_irq->round_robin_prev_vcpu);
+}
+#else
+static void ioapic_info(struct vioapic *s)
+{
+}
+static void hvmirq_info(struct hvm_irq *hvm_irq)
+{
+}
+#endif
+
+static void ioapic_save(hvm_domain_context_t *h, void *opaque)
+{
+    int i;
+    struct domain *d = opaque;
+    struct vioapic *s = domain_vioapic(d);
+    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+    ioapic_info(s);
+    hvmirq_info(hvm_irq);
+
+    /* save iopaic state*/
+    hvm_put_32u(h, s->ioregsel);
+    hvm_put_32u(h, s->id);
+    hvm_put_64u(h, s->base_address);
+    for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+        hvm_put_64u(h, s->redirtbl[i].bits);
+    }
+
+    /* save hvm irq state */
+    hvm_put_buffer(h, (char*)hvm_irq->pci_intx, 16);
+    hvm_put_buffer(h, (char*)hvm_irq->isa_irq, 2);
+    hvm_put_buffer(h, (char*)hvm_irq->callback_irq_wire, 1);
+    hvm_put_32u(h, hvm_irq->callback_gsi);
+
+    for (i = 0; i < 4; i++)
+        hvm_put_8u(h, hvm_irq->pci_link_route[i]);
+
+    for (i = 0; i < 4; i++)
+        hvm_put_8u(h, hvm_irq->pci_link_assert_count[i]);
+
+    for (i = 0; i < VIOAPIC_NUM_PINS; i++)
+        hvm_put_8u(h, hvm_irq->gsi_assert_count[i]);
+
+    hvm_put_8u(h, hvm_irq->round_robin_prev_vcpu);
+
+}
+
+static int ioapic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+    int i;
+    struct domain *d = opaque;
+    struct vioapic *s = domain_vioapic(d);
+    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+    
+    if (version_id != 1)
+        return -EINVAL;
+
+    /* restore ioapic state */
+    s->ioregsel = hvm_get_32u(h);
+    s->id = hvm_get_32u(h);
+    s->base_address = hvm_get_64u(h);
+    for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+        s->redirtbl[i].bits = hvm_get_64u(h);
+    }
+
+    /* restore irq state */
+    hvm_get_buffer(h, (char*)hvm_irq->pci_intx, 16);
+    hvm_get_buffer(h, (char*)hvm_irq->isa_irq, 2);
+    hvm_get_buffer(h, (char*)hvm_irq->callback_irq_wire, 1);
+    hvm_irq->callback_gsi = hvm_get_32u(h);
+
+    for (i = 0; i < 4; i++)
+        hvm_irq->pci_link_route[i] = hvm_get_8u(h);
+
+    for (i = 0; i < 4; i++)
+        hvm_irq->pci_link_assert_count[i] = hvm_get_8u(h);
+
+    for (i = 0; i < VIOAPIC_NUM_PINS; i++)
+        hvm_irq->gsi_assert_count[i] = hvm_get_8u(h);
+
+    hvm_irq->round_robin_prev_vcpu = hvm_get_8u(h);
+
+    ioapic_info(s);
+    hvmirq_info(hvm_irq);
+
+    return 0;
+}
+
 void vioapic_init(struct domain *d)
 {
     struct vioapic *vioapic = domain_vioapic(d);
     int i;
+
+    hvm_register_savevm(d, "xen_hvm_ioapic", 0, 1, ioapic_save, ioapic_load, 
d);
 
     memset(vioapic, 0, sizeof(*vioapic));
     for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/vlapic.c Thu Jan 18 16:48:05 2007 +0000
@@ -659,7 +659,7 @@ static void vlapic_write(struct vcpu *v,
         uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)val * 
vlapic->timer_divisor;
 
         vlapic_set_reg(vlapic, APIC_TMICT, val);
-        create_periodic_time(&vlapic->pt, period, vlapic->pt.irq,
+        create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
                              vlapic_lvtt_period(vlapic), NULL, vlapic);
 
         HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
@@ -795,6 +795,77 @@ static int vlapic_reset(struct vlapic *v
     return 1;
 }
 
+#ifdef HVM_DEBUG_SUSPEND
+static void lapic_info(struct vlapic *s)
+{
+    printk("*****lapic state:*****\n");
+    printk("lapic 0x%"PRIx64".\n", s->apic_base_msr);
+    printk("lapic 0x%x.\n", s->disabled);
+    printk("lapic 0x%x.\n", s->timer_divisor);
+    printk("lapic 0x%x.\n", s->timer_pending_count);
+}
+#else
+static void lapic_info(struct vlapic *s)
+{
+}
+#endif
+
+static void lapic_save(hvm_domain_context_t *h, void *opaque)
+{
+    struct vlapic *s = opaque;
+
+    lapic_info(s);
+
+    hvm_put_64u(h, s->apic_base_msr);
+    hvm_put_32u(h, s->disabled);
+    hvm_put_32u(h, s->timer_divisor);
+
+    /*XXX: need this?*/
+    hvm_put_32u(h, s->timer_pending_count);
+
+    hvm_put_buffer(h, (char*)s->regs, 0x3f0);
+
+}
+
+static int lapic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+    struct vlapic *s = opaque;
+    struct vcpu *v = vlapic_vcpu(s);
+    unsigned long tmict;
+
+    if (version_id != 1)
+        return -EINVAL;
+
+    s->apic_base_msr = hvm_get_64u(h);
+    s->disabled = hvm_get_32u(h);
+    s->timer_divisor = hvm_get_32u(h);
+
+    /*XXX: need this?*/
+    s->timer_pending_count = hvm_get_32u(h);
+
+    hvm_get_buffer(h, (char*)s->regs, 0x3f0);
+
+    /* rearm the actiemr if needed */
+    tmict = vlapic_get_reg(s, APIC_TMICT);
+    if (tmict > 0) {
+        uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * 
s->timer_divisor;
+
+        create_periodic_time(v, &s->pt, period, s->pt.irq,
+                             vlapic_lvtt_period(s), NULL, s);
+
+        printk("lapic_load to rearm the actimer:"
+                    "bus cycle is %uns, "
+                    "saved tmict count %lu, period %"PRIu64"ns\n",
+                    APIC_BUS_CYCLE_NS, tmict, period);
+
+    }
+
+
+    lapic_info(s);
+
+    return 0;
+}
+
 int vlapic_init(struct vcpu *v)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
@@ -813,6 +884,7 @@ int vlapic_init(struct vcpu *v)
     vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
     memset(vlapic->regs, 0, PAGE_SIZE);
 
+    hvm_register_savevm(v->domain, "xen_hvm_lapic", v->vcpu_id, 1, lapic_save, 
lapic_load, vlapic);
     vlapic_reset(vlapic);
 
     vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/vpic.c
--- a/xen/arch/x86/hvm/vpic.c   Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/vpic.c   Thu Jan 18 16:48:05 2007 +0000
@@ -378,6 +378,87 @@ static int vpic_intercept_elcr_io(ioreq_
     return 1;
 }
 
+#ifdef HVM_DEBUG_SUSPEND
+static void vpic_info(struct vpic *s)
+{
+    printk("*****pic state:*****\n");
+    printk("pic 0x%x.\n", s->irr);
+    printk("pic 0x%x.\n", s->imr);
+    printk("pic 0x%x.\n", s->isr);
+    printk("pic 0x%x.\n", s->irq_base);
+    printk("pic 0x%x.\n", s->init_state);
+    printk("pic 0x%x.\n", s->priority_add);
+    printk("pic 0x%x.\n", s->readsel_isr);
+    printk("pic 0x%x.\n", s->poll);
+    printk("pic 0x%x.\n", s->auto_eoi);
+    printk("pic 0x%x.\n", s->rotate_on_auto_eoi);
+    printk("pic 0x%x.\n", s->special_fully_nested_mode);
+    printk("pic 0x%x.\n", s->special_mask_mode);
+    printk("pic 0x%x.\n", s->elcr);
+    printk("pic 0x%x.\n", s->int_output);
+    printk("pic 0x%x.\n", s->is_master);
+}
+#else
+static void vpic_info(struct vpic *s)
+{
+}
+#endif
+
+static void vpic_save(hvm_domain_context_t *h, void *opaque)
+{
+    struct vpic *s = opaque;
+    
+    vpic_info(s);
+
+    hvm_put_8u(h, s->irr);
+    hvm_put_8u(h, s->imr);
+    hvm_put_8u(h, s->isr);
+    hvm_put_8u(h, s->irq_base);
+    hvm_put_8u(h, s->init_state);
+    hvm_put_8u(h, s->priority_add);
+    hvm_put_8u(h, s->readsel_isr);
+
+    hvm_put_8u(h, s->poll);
+    hvm_put_8u(h, s->auto_eoi);
+
+    hvm_put_8u(h, s->rotate_on_auto_eoi);
+    hvm_put_8u(h, s->special_fully_nested_mode);
+    hvm_put_8u(h, s->special_mask_mode);
+
+    hvm_put_8u(h, s->elcr);
+    hvm_put_8u(h, s->int_output);
+}
+
+static int vpic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+    struct vpic *s = opaque;
+    
+    if (version_id != 1)
+        return -EINVAL;
+
+    s->irr = hvm_get_8u(h);
+    s->imr = hvm_get_8u(h);
+    s->isr = hvm_get_8u(h);
+    s->irq_base = hvm_get_8u(h);
+    s->init_state = hvm_get_8u(h);
+    s->priority_add = hvm_get_8u(h);
+    s->readsel_isr = hvm_get_8u(h);
+
+    s->poll = hvm_get_8u(h);
+    s->auto_eoi = hvm_get_8u(h);
+
+    s->rotate_on_auto_eoi = hvm_get_8u(h);
+    s->special_fully_nested_mode = hvm_get_8u(h);
+    s->special_mask_mode = hvm_get_8u(h);
+
+    s->elcr = hvm_get_8u(h);
+    s->int_output = hvm_get_8u(h);
+
+    vpic_info(s);
+
+    return 0;
+}
+
 void vpic_init(struct domain *d)
 {
     struct vpic *vpic;
@@ -387,12 +468,14 @@ void vpic_init(struct domain *d)
     memset(vpic, 0, sizeof(*vpic));
     vpic->is_master = 1;
     vpic->elcr      = 1 << 2;
+    hvm_register_savevm(d, "xen_hvm_i8259", 0x20, 1, vpic_save, vpic_load, 
vpic);
     register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
     register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
 
     /* Slave PIC. */
     vpic++;
     memset(vpic, 0, sizeof(*vpic));
+    hvm_register_savevm(d, "xen_hvm_i8259", 0xa0, 1, vpic_save, vpic_load, 
vpic);
     register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
     register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
 }
diff -r 44668189f354 -r 5f340f19bbb7 xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/arch/x86/hvm/vpt.c    Thu Jan 18 16:48:05 2007 +0000
@@ -195,7 +195,7 @@ void pt_reset(struct vcpu *v)
     }
 }
 
-void create_periodic_time(struct periodic_time *pt, uint64_t period,
+void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t 
period,
                           uint8_t irq, char one_shot, time_cb *cb, void *data)
 {
     destroy_periodic_time(pt);
@@ -209,7 +209,7 @@ void create_periodic_time(struct periodi
         period = 900000; /* force to 0.9ms */
     }
     pt->period = period;
-    pt->vcpu = current;
+    pt->vcpu = v;
     pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
     pt->irq = irq;
     pt->period_cycles = (u64)period * cpu_khz / 1000000L;
@@ -218,7 +218,7 @@ void create_periodic_time(struct periodi
     pt->cb = cb;
     pt->priv = data;
 
-    list_add(&pt->list, &current->arch.hvm_vcpu.tm_list);
+    list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
     set_timer(&pt->timer, pt->scheduled);
 }
 
diff -r 44668189f354 -r 5f340f19bbb7 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/include/asm-x86/hvm/support.h Thu Jan 18 16:48:05 2007 +0000
@@ -246,6 +246,8 @@ extern int arch_sethvm_ctxt(struct vcpu 
 extern int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
 extern int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
 
+extern void shpage_init(struct domain *d, shared_iopage_t *sp);
+
 extern int hvm_enabled;
 
 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
diff -r 44668189f354 -r 5f340f19bbb7 xen/include/asm-x86/hvm/vpt.h
--- a/xen/include/asm-x86/hvm/vpt.h     Thu Jan 18 16:48:04 2007 +0000
+++ b/xen/include/asm-x86/hvm/vpt.h     Thu Jan 18 16:48:05 2007 +0000
@@ -152,7 +152,7 @@ struct periodic_time *is_pt_irq(struct v
 struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
 void pt_intr_post(struct vcpu *v, int vector, int type);
 void pt_reset(struct vcpu *v);
-void create_periodic_time(struct periodic_time *pt, uint64_t period,
+void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t 
period,
                           uint8_t irq, char one_shot, time_cb *cb, void *data);
 void destroy_periodic_time(struct periodic_time *pt);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] save restore: save restore dev in HV, Xen patchbot-unstable <=