WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 4/6] x86: split struct domain

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 4/6] x86: split struct domain
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Tue, 05 Apr 2011 09:21:50 +0100
Delivery-date: Tue, 05 Apr 2011 01:25:16 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This is accomplished by converting a couple of embedded arrays (in one
case a structure containing an array) into separately allocated
pointers, and (just as for struct arch_vcpu in a prior patch) overlaying
some PV-only fields with HVM-only ones.

One particularly noteworthy change in the opposite direction is that of
PITState - this field so far lived in the HVM-only portion, but is being
used by PV guests too, and hence needed to be moved out of struct
hvm_domain.

The change to XENMEM_set_memory_map (and hence libxl__build_pre() and
the movement of the E820 related pieces to struct pv_domain) are subject
to a positive response to a query sent to xen-devel regarding the need
for this to happen for HVM guests (see
http://lists.xensource.com/archives/html/xen-devel/2011-03/msg01848.html).

The protection of arch.hvm_domain.irq.dpci accesses by is_hvm_domain()
is subject to confirmation that the field is used for HVM guests only (see
http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02004.html).

In the absence of any reply to these queries, and given the early state of
4.2 development, I think it should be acceptable to take the risk of
having to later undo/redo some of this.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -72,9 +72,9 @@ int libxl__build_pre(libxl__gc *gc, uint
     libxl_ctx *ctx = libxl__gc_owner(gc);
     xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus);
     xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + 
LIBXL_MAXMEM_CONSTANT);
-    xc_domain_set_memmap_limit(ctx->xch, domid, 
-            (info->hvm) ? info->max_memkb : 
-            (info->max_memkb + info->u.pv.slack_memkb));
+    if (!info->hvm)
+        xc_domain_set_memmap_limit(ctx->xch, domid,
+                (info->max_memkb + info->u.pv.slack_memkb));
     xc_domain_set_tsc_info(ctx->xch, domid, info->tsc_mode, 0, 0, 0);
     if ( info->disable_migrate )
         xc_domain_disable_migrate(ctx->xch, domid);
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -187,16 +187,17 @@ struct domain *alloc_domain_struct(void)
 #ifdef __x86_64__
     bits += pfn_pdx_hole_shift;
 #endif
-    d = alloc_xenheap_pages(get_order_from_bytes(sizeof(*d)), MEMF_bits(bits));
+    BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
+    d = alloc_xenheap_pages(0, MEMF_bits(bits));
     if ( d != NULL )
-        memset(d, 0, sizeof(*d));
+        clear_page(d);
     return d;
 }
 
 void free_domain_struct(struct domain *d)
 {
     lock_profile_deregister_struct(LOCKPROF_TYPE_PERDOM, d);
-    free_xenheap_pages(d, get_order_from_bytes(sizeof(*d)));
+    free_xenheap_page(d);
 }
 
 struct vcpu *alloc_vcpu_struct(void)
@@ -531,6 +532,17 @@ int arch_domain_create(struct domain *d,
 
     if ( !is_idle_domain(d) )
     {
+        d->arch.cpuids = xmalloc_array(cpuid_input_t, MAX_CPUID_INPUT);
+        rc = -ENOMEM;
+        if ( d->arch.cpuids == NULL )
+            goto fail;
+        memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids));
+        for ( i = 0; i < MAX_CPUID_INPUT; i++ )
+        {
+            d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED;
+            d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED;
+        }
+
         d->arch.ioport_caps = 
             rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
         rc = -ENOMEM;
@@ -599,13 +611,6 @@ int arch_domain_create(struct domain *d,
             (CONFIG_PAGING_LEVELS != 4);
     }
 
-    memset(d->arch.cpuids, 0, sizeof(d->arch.cpuids));
-    for ( i = 0; i < MAX_CPUID_INPUT; i++ )
-    {
-        d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED;
-        d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED;
-    }
-
     /* initialize default tsc behavior in case tools don't */
     tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
     spin_lock_init(&d->arch.vtsc_lock);
@@ -2067,11 +2072,12 @@ int domain_relinquish_resources(struct d
                 unmap_vcpu_info(v);
             }
 
-            if ( d->arch.pirq_eoi_map != NULL )
+            if ( d->arch.pv_domain.pirq_eoi_map != NULL )
             {
-                unmap_domain_page_global(d->arch.pirq_eoi_map);
-                put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
-                d->arch.pirq_eoi_map = NULL;
+                unmap_domain_page_global(d->arch.pv_domain.pirq_eoi_map);
+                put_page_and_type(
+                    mfn_to_page(d->arch.pv_domain.pirq_eoi_map_mfn));
+                d->arch.pv_domain.pirq_eoi_map = NULL;
             }
         }
 
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -900,6 +900,10 @@ long arch_do_domctl(
             break;
         bind = &(domctl->u.bind_pt_irq);
 
+        ret = -EINVAL;
+        if ( !is_hvm_domain(d) )
+            goto bind_out;
+
         ret = xsm_bind_pt_irq(d, bind);
         if ( ret )
             goto bind_out;
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -237,8 +237,7 @@ static void hpet_set_timer(HPETState *h,
     {
         /* HPET specification requires PIT shouldn't generate
          * interrupts if LegacyReplacementRoute is set for timer0 */
-        PITState *pit = &vhpet_domain(h)->arch.hvm_domain.pl_time.vpit;
-        pit_stop_channel0_irq(pit);
+        pit_stop_channel0_irq(&vhpet_domain(h)->arch.vpit);
     }
 
     if ( !timer_enabled(h, tn) )
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -412,7 +412,7 @@ static int hvm_print_line(
 
     spin_lock(&hd->pbuf_lock);
     hd->pbuf[hd->pbuf_idx++] = c;
-    if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
+    if ( (hd->pbuf_idx == (HVM_PBUF_SIZE - 2)) || (c == '\n') )
     {
         if ( c != '\n' )
             hd->pbuf[hd->pbuf_idx++] = '\n';
@@ -443,6 +443,19 @@ int hvm_domain_initialise(struct domain 
     INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
     spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
 
+    d->arch.hvm_domain.pbuf = xmalloc_array(char, HVM_PBUF_SIZE);
+    d->arch.hvm_domain.params = xmalloc_array(uint64_t, HVM_NR_PARAMS);
+    d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
+    rc = -ENOMEM;
+    if ( !d->arch.hvm_domain.pbuf || !d->arch.hvm_domain.params ||
+         !d->arch.hvm_domain.io_handler )
+        goto fail0;
+    memset(d->arch.hvm_domain.pbuf, 0,
+           HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf));
+    memset(d->arch.hvm_domain.params, 0,
+           HVM_NR_PARAMS * sizeof(*d->arch.hvm_domain.params));
+    d->arch.hvm_domain.io_handler->num_slot = 0;
+
     hvm_init_guest_time(d);
 
     d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
@@ -480,6 +493,10 @@ int hvm_domain_initialise(struct domain 
     vioapic_deinit(d);
  fail1:
     hvm_destroy_cacheattr_region_list(d);
+ fail0:
+    xfree(d->arch.hvm_domain.io_handler);
+    xfree(d->arch.hvm_domain.params);
+    xfree(d->arch.hvm_domain.pbuf);
     return rc;
 }
 
@@ -500,6 +517,10 @@ void hvm_domain_relinquish_resources(str
         pmtimer_deinit(d);
         hpet_deinit(d);
     }
+
+    xfree(d->arch.hvm_domain.io_handler);
+    xfree(d->arch.hvm_domain.params);
+    xfree(d->arch.hvm_domain.pbuf);
 }
 
 void hvm_domain_destroy(struct domain *d)
@@ -2533,10 +2554,20 @@ static long hvm_grant_table_op(
 
 static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg)
 {
-    long rc = do_memory_op(cmd, arg);
-    if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
+    long rc;
+
+    switch ( cmd & MEMOP_CMD_MASK )
+    {
+    case XENMEM_memory_map:
+    case XENMEM_machine_memory_map:
+    case XENMEM_machphys_mapping:
+        return -ENOSYS;
+    case XENMEM_decrease_reservation:
+        rc = do_memory_op(cmd, arg);
         current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1;
-    return rc;
+        return rc;
+    }
+    return do_memory_op(cmd, arg);
 }
 
 static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
@@ -2613,10 +2644,20 @@ static long hvm_grant_table_op_compat32(
 
 static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
 {
-    long rc = compat_memory_op(cmd, arg);
-    if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
+    int rc;
+
+    switch ( cmd & MEMOP_CMD_MASK )
+    {
+    case XENMEM_memory_map:
+    case XENMEM_machine_memory_map:
+    case XENMEM_machphys_mapping:
+        return -ENOSYS;
+    case XENMEM_decrease_reservation:
+        rc = compat_memory_op(cmd, arg);
         current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1;
-    return rc;
+        return rc;
+    }
+    return compat_memory_op(cmd, arg);
 }
 
 static long hvm_vcpu_op_compat32(
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -38,10 +38,9 @@
 #include <asm/hvm/vpt.h>
 #include <asm/current.h>
 
-#define domain_vpit(x) (&(x)->arch.hvm_domain.pl_time.vpit)
+#define domain_vpit(x) (&(x)->arch.vpit)
 #define vcpu_vpit(x)   (domain_vpit((x)->domain))
-#define vpit_domain(x) (container_of((x), struct domain, \
-                                     arch.hvm_domain.pl_time.vpit))
+#define vpit_domain(x) (container_of((x), struct domain, arch.vpit))
 #define vpit_vcpu(x)   (pt_global_vcpu_target(vpit_domain(x)))
 
 #define RW_STATE_LSB 1
@@ -450,14 +449,18 @@ void pit_reset(struct domain *d)
 
 void pit_init(struct vcpu *v, unsigned long cpu_khz)
 {
-    PITState *pit = vcpu_vpit(v);
+    struct domain *d = v->domain;
+    PITState *pit = domain_vpit(d);
 
     spin_lock_init(&pit->lock);
 
-    register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
-    register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
+    if ( is_hvm_domain(d) )
+    {
+        register_portio_handler(d, PIT_BASE, 4, handle_pit_io);
+        register_portio_handler(d, 0x61, 1, handle_speaker_io);
+    }
 
-    pit_reset(v->domain);
+    pit_reset(d);
 }
 
 void pit_deinit(struct domain *d)
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -195,8 +195,7 @@ static int process_portio_intercept(port
 int hvm_io_intercept(ioreq_t *p, int type)
 {
     struct vcpu *v = current;
-    struct hvm_io_handler *handler =
-        &v->domain->arch.hvm_domain.io_handler;
+    struct hvm_io_handler *handler = v->domain->arch.hvm_domain.io_handler;
     int i;
     unsigned long addr, size;
 
@@ -230,7 +229,7 @@ void register_io_handler(
     struct domain *d, unsigned long addr, unsigned long size,
     void *action, int type)
 {
-    struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
+    struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler;
     int num = handler->num_slot;
 
     BUG_ON(num >= MAX_IO_HANDLER);
@@ -246,7 +245,7 @@ void relocate_io_handler(
     struct domain *d, unsigned long old_addr, unsigned long new_addr,
     unsigned long size, int type)
 {
-    struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
+    struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler;
     int i;
 
     for ( i = 0; i < handler->num_slot; i++ )
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -272,8 +272,7 @@ static void ioapic_inj_irq(
 
 static inline int pit_channel0_enabled(void)
 {
-    PITState *pit = &current->domain->arch.hvm_domain.pl_time.vpit;
-    return pt_active(&pit->pt0);
+    return pt_active(&current->domain->arch.vpit.pt0);
 }
 
 static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -463,17 +463,20 @@ static void pt_adjust_vcpu(struct period
 
 void pt_adjust_global_vcpu_target(struct vcpu *v)
 {
+    struct PITState *vpit;
     struct pl_time *pl_time;
     int i;
 
     if ( v == NULL )
         return;
 
-    pl_time = &v->domain->arch.hvm_domain.pl_time;
+    vpit = &v->domain->arch.vpit;
+
+    spin_lock(&vpit->lock);
+    pt_adjust_vcpu(&vpit->pt0, v);
+    spin_unlock(&vpit->lock);
 
-    spin_lock(&pl_time->vpit.lock);
-    pt_adjust_vcpu(&pl_time->vpit.pt0, v);
-    spin_unlock(&pl_time->vpit.lock);
+    pl_time = &v->domain->arch.hvm_domain.pl_time;
 
     spin_lock(&pl_time->vrtc.lock);
     pt_adjust_vcpu(&pl_time->vrtc.pt, v);
@@ -507,7 +510,7 @@ void pt_may_unmask_irq(struct domain *d,
 
     if ( d )
     {
-        pt_resume(&d->arch.hvm_domain.pl_time.vpit.pt0);
+        pt_resume(&d->arch.vpit.pt0);
         pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt);
         for ( i = 0; i < HPET_TIMER_NUM; i++ )
             pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]);
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -764,14 +764,14 @@ bool_t cpu_has_pending_apic_eoi(void)
 
 static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
 {
-    if ( d->arch.pirq_eoi_map )
-        set_bit(irq, d->arch.pirq_eoi_map);
+    if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map )
+        set_bit(irq, d->arch.pv_domain.pirq_eoi_map);
 }
 
 static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
 {
-    if ( d->arch.pirq_eoi_map )
-        clear_bit(irq, d->arch.pirq_eoi_map);
+    if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map )
+        clear_bit(irq, d->arch.pv_domain.pirq_eoi_map);
 }
 
 static void _irq_guest_eoi(struct irq_desc *desc)
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4710,7 +4710,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
         if ( copy_from_guest(&fmap, arg, 1) )
             return -EFAULT;
 
-        if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.e820) )
+        if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) )
             return -EINVAL;
 
         rc = rcu_lock_target_domain_by_id(fmap.domid, &d);
@@ -4724,9 +4724,15 @@ long arch_memory_op(int op, XEN_GUEST_HA
             return rc;
         }
 
-        rc = copy_from_guest(d->arch.e820, fmap.map.buffer,
+        if ( is_hvm_domain(d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
+
+        rc = copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer,
                              fmap.map.nr_entries) ? -EFAULT : 0;
-        d->arch.nr_e820 = fmap.map.nr_entries;
+        d->arch.pv_domain.nr_e820 = fmap.map.nr_entries;
 
         rcu_unlock_domain(d);
         return rc;
@@ -4738,14 +4744,15 @@ long arch_memory_op(int op, XEN_GUEST_HA
         struct domain *d = current->domain;
 
         /* Backwards compatibility. */
-        if ( d->arch.nr_e820 == 0 )
+        if ( d->arch.pv_domain.nr_e820 == 0 )
             return -ENOSYS;
 
         if ( copy_from_guest(&map, arg, 1) )
             return -EFAULT;
 
-        map.nr_entries = min(map.nr_entries, d->arch.nr_e820);
-        if ( copy_to_guest(map.buffer, d->arch.e820, map.nr_entries) ||
+        map.nr_entries = min(map.nr_entries, d->arch.pv_domain.nr_e820);
+        if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820,
+                           map.nr_entries) ||
              copy_to_guest(arg, &map, 1) )
             return -EFAULT;
 
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -264,7 +264,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         ret = -EINVAL;
         if ( eoi.irq >= v->domain->nr_pirqs )
             break;
-        if ( v->domain->arch.pirq_eoi_map )
+        if ( !is_hvm_domain(v->domain) &&
+             v->domain->arch.pv_domain.pirq_eoi_map )
             evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
         if ( !is_hvm_domain(v->domain) ||
              domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
@@ -289,17 +290,18 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
                                 PGT_writable_page) )
             break;
 
-        if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
+        if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn,
+                     0, mfn) != 0 )
         {
             put_page_and_type(mfn_to_page(mfn));
             ret = -EBUSY;
             break;
         }
 
-        v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
-        if ( v->domain->arch.pirq_eoi_map == NULL )
+        v->domain->arch.pv_domain.pirq_eoi_map = map_domain_page_global(mfn);
+        if ( v->domain->arch.pv_domain.pirq_eoi_map == NULL )
         {
-            v->domain->arch.pirq_eoi_map_mfn = 0;
+            v->domain->arch.pv_domain.pirq_eoi_map_mfn = 0;
             put_page_and_type(mfn_to_page(mfn));
             ret = -ENOSPC;
             break;
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -85,6 +85,14 @@ static void pt_irq_time_out(void *data)
     }
 }
 
+struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d)
+{
+    if ( !d || !is_hvm_domain(d) )
+        return NULL;
+
+    return d->arch.hvm_domain.irq.dpci;
+}
+
 void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
 {
     xfree(dpci->mirq);
@@ -150,12 +158,7 @@ int pt_irq_create_bind_vtd(
         for ( int i = 0; i < NR_HVM_IRQS; i++ )
             INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);
 
-        if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
-        {
-            spin_unlock(&d->event_lock);
-            free_hvm_irq_dpci(hvm_irq_dpci);
-            return -EINVAL;
-        }
+        d->arch.hvm_domain.irq.dpci = hvm_irq_dpci;
     }
 
     if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
@@ -392,8 +395,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
     struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
 
     ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock));
-    if ( !iommu_enabled || (d == dom0) || !dpci ||
-         !test_bit(mirq, dpci->mapping))
+    if ( !iommu_enabled || !dpci || !test_bit(mirq, dpci->mapping))
         return 0;
 
     set_bit(mirq, dpci->dirq_mask);
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c
@@ -70,23 +70,6 @@ void *__init map_to_nocache_virt(int nr_
   return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
 }
 
-struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
-{
-    if ( !domain )
-        return NULL;
-
-    return domain->arch.hvm_domain.irq.dpci;
-}
-
-int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
-{
-    if ( !domain || !dpci )
-        return 0;
-
-    domain->arch.hvm_domain.irq.dpci = dpci;
-    return 1;
-}
-
 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
 {
     /* dummy */
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -68,23 +68,6 @@ void *__init map_to_nocache_virt(int nr_
     return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
 }
 
-struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
-{
-    if ( !domain )
-        return NULL;
-
-    return domain->arch.hvm_domain.irq.dpci;
-}
-
-int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
-{
-    if ( !domain || !dpci )
-        return 0;
-
-    domain->arch.hvm_domain.irq.dpci = dpci;
-    return 1;
-}
-
 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -231,6 +231,17 @@ struct time_scale {
     u32 mul_frac;
 };
 
+struct pv_domain
+{
+    /* Shared page for notifying that explicit PIRQ EOI is required. */
+    unsigned long *pirq_eoi_map;
+    unsigned long pirq_eoi_map_mfn;
+
+    /* Pseudophysical e820 map (XENMEM_memory_map).  */
+    struct e820entry e820[3];
+    unsigned int nr_e820;
+};
+
 struct arch_domain
 {
 #ifdef CONFIG_X86_64
@@ -253,7 +264,11 @@ struct arch_domain
     uint32_t pci_cf8;
 
     struct list_head pdev_list;
-    struct hvm_domain hvm_domain;
+
+    union {
+        struct pv_domain pv_domain;
+        struct hvm_domain hvm_domain;
+    };
 
     struct paging_domain paging;
     struct p2m_domain *p2m;
@@ -265,14 +280,6 @@ struct arch_domain
     int *emuirq_pirq;
     int *pirq_emuirq;
 
-    /* Shared page for notifying that explicit PIRQ EOI is required. */
-    unsigned long *pirq_eoi_map;
-    unsigned long pirq_eoi_map_mfn;
-
-    /* Pseudophysical e820 map (XENMEM_memory_map).  */
-    struct e820entry e820[3];
-    unsigned int nr_e820;
-
     /* Maximum physical-address bitwidth supported by this guest. */
     unsigned int physaddr_bitsize;
 
@@ -294,7 +301,9 @@ struct arch_domain
     } relmem;
     struct page_list_head relmem_list;
 
-    cpuid_input_t cpuids[MAX_CPUID_INPUT];
+    cpuid_input_t *cpuids;
+
+    struct PITState vpit;
 
     /* For Guest vMCA handling */
     struct domain_mca_msrs *vmca_msrs;
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -47,7 +47,7 @@ struct hvm_domain {
 
     struct pl_time         pl_time;
 
-    struct hvm_io_handler  io_handler;
+    struct hvm_io_handler *io_handler;
 
     /* Lock protects access to irq, vpic and vioapic. */
     spinlock_t             irq_lock;
@@ -60,11 +60,12 @@ struct hvm_domain {
     struct vcpu           *i8259_target;
 
     /* hvm_print_line() logging. */
-    char                   pbuf[80];
+#define HVM_PBUF_SIZE 80
+    char                  *pbuf;
     int                    pbuf_idx;
     spinlock_t             pbuf_lock;
 
-    uint64_t               params[HVM_NR_PARAMS];
+    uint64_t              *params;
 
     /* Memory ranges with pinned cache attributes. */
     struct list_head       pinned_cacheattr_ranges;
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -124,7 +124,6 @@ typedef struct PMTState {
 } PMTState;
 
 struct pl_time {    /* platform time */
-    struct PITState  vpit;
     struct RTCState  vrtc;
     struct HPETState vhpet;
     struct PMTState  vpmt;
@@ -143,7 +142,9 @@ void pt_migrate(struct vcpu *v);
 
 void pt_adjust_global_vcpu_target(struct vcpu *v);
 #define pt_global_vcpu_target(d) \
-    ((d)->arch.hvm_domain.i8259_target ? : (d)->vcpu ? (d)->vcpu[0] : NULL)
+    (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \
+     (d)->arch.hvm_domain.i8259_target : \
+     (d)->vcpu ? (d)->vcpu[0] : NULL)
 
 void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt);
 
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -106,8 +106,7 @@ struct qi_ctrl *iommu_qi_ctrl(struct iom
 struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu);
 struct iommu_flush *iommu_get_flush(struct iommu *iommu);
 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
-struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain);
-int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci);
+struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
 void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
 bool_t pt_irq_need_timer(uint32_t flags);
 


Attachment: x86-domain-struct-split.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 4/6] x86: split struct domain, Jan Beulich <=