WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] replace MAX_VCPUS with d->max_vcpu

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] replace MAX_VCPUS with d->max_vcpus where necessary.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 29 Jun 2009 03:20:16 -0700
Delivery-date: Mon, 29 Jun 2009 03:21:08 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1246242365 -32400
# Node ID 5839491bbf201b7470b506882b4cb4d382772057
# Parent  772e809e58cefcc67b644e13784b83e2134c4ad9
[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/viosapic.c      |    2 +-
 xen/arch/ia64/vmx/vlsapic.c       |   10 +++++-----
 xen/arch/ia64/vmx/vmx_init.c      |    2 +-
 xen/arch/ia64/vmx/vmx_vcpu_save.c |    4 ++--
 xen/arch/ia64/xen/dom0_ops.c      |    3 ++-
 xen/arch/ia64/xen/dom_fw_dom0.c   |    2 +-
 xen/arch/ia64/xen/domain.c        |    3 ++-
 xen/arch/ia64/xen/hypercall.c     |    2 +-
 xen/arch/ia64/xen/vhpt.c          |    4 ++--
 xen/include/asm-ia64/vcpumask.h   |    6 +++---
 10 files changed, 20 insertions(+), 18 deletions(-)

diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/vmx/viosapic.c
--- a/xen/arch/ia64/vmx/viosapic.c      Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/vmx/viosapic.c      Mon Jun 29 11:26:05 2009 +0900
@@ -378,7 +378,7 @@ static int viosapic_load(struct domain *
         return -EINVAL;
 
     lowest_vcpu = NULL;
-    if (viosapic_load.lowest_vcpu_id < MAX_VIRT_CPUS)
+    if (viosapic_load.lowest_vcpu_id < d->max_vcpus)
         lowest_vcpu = d->vcpu[viosapic_load.lowest_vcpu_id];
     else if (viosapic_load.lowest_vcpu_id != VIOSAPIC_INVALID_VCPU_ID)
         return -EINVAL;
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/vmx/vlsapic.c       Mon Jun 29 11:26:05 2009 +0900
@@ -153,7 +153,7 @@ static void vtm_reset(VCPU *vcpu)
 
     if (vcpu->vcpu_id == 0) {
         vtm_offset = 0UL - ia64_get_itc();
-        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+        for (i = d->max_vcpus - 1; i >= 0; i--) {
             if ((v = d->vcpu[i]) != NULL) {
                 VMX(v, vtm).vtm_offset = vtm_offset;
                 VMX(v, vtm).last_itc = 0;
@@ -227,7 +227,7 @@ void vtm_set_itc(VCPU *vcpu, uint64_t ne
     vtm = &VMX(vcpu, vtm);
     if (vcpu->vcpu_id == 0) {
         vtm_offset = new_itc - ia64_get_itc();
-        for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+        for (i = d->max_vcpus - 1; i >= 0; i--) {
             if ((v = d->vcpu[i]) != NULL) {
                 VMX(v, vtm).vtm_offset = vtm_offset;
                 VMX(v, vtm).last_itc = 0;
@@ -606,7 +606,7 @@ struct vcpu *lid_to_vcpu(struct domain *
     int id = dest >> 8;
 
     /* Fast look: assume EID=0 ID=vcpu_id.  */
-    if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS)
+    if ((dest & 0xff) == 0 && id < d->max_vcpus)
         return d->vcpu[id];
     return NULL;
 }
@@ -875,7 +875,7 @@ static int vlsapic_load(struct domain *d
     int i;
 
     vcpuid = hvm_load_instance(h);
-    if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+    if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
         gdprintk(XENLOG_ERR,
                  "%s: domain has no vlsapic %u\n", __func__, vcpuid);
         return -EINVAL;
@@ -934,7 +934,7 @@ static int vtime_load(struct domain *d, 
     vtime_t *vtm;
 
     vcpuid = hvm_load_instance(h);
-    if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+    if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
         gdprintk(XENLOG_ERR,
                  "%s: domain has no vtime %u\n", __func__, vcpuid);
         return -EINVAL;
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/vmx/vmx_init.c      Mon Jun 29 11:26:05 2009 +0900
@@ -623,7 +623,7 @@ int vmx_setup_platform(struct domain *d)
 
        if (d->arch.is_sioemu) {
                int i;
-               for (i = 1; i < MAX_VIRT_CPUS; i++)
+               for (i = 1; i < XEN_LEGACY_MAX_VCPUS; i++)
                        d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
        }
 
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/vmx/vmx_vcpu_save.c
--- a/xen/arch/ia64/vmx/vmx_vcpu_save.c Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/vmx/vmx_vcpu_save.c Mon Jun 29 11:26:05 2009 +0900
@@ -228,7 +228,7 @@ static int vmx_cpu_load(struct domain *d
     struct pt_regs *regs;
 
     vcpuid = hvm_load_instance(h);
-    if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+    if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
         gdprintk(XENLOG_ERR,
                  "%s: domain has no vcpu %u\n", __func__, vcpuid);
         rc = -EINVAL;
@@ -278,7 +278,7 @@ static int vmx_vpd_load(struct domain *d
     int i;
 
     vcpuid = hvm_load_instance(h);
-    if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+    if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
         gdprintk(XENLOG_ERR,
                  "%s: domain has no vcpu %u\n", __func__, vcpuid);
         rc = -EINVAL;
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/xen/dom0_ops.c      Mon Jun 29 11:26:05 2009 +0900
@@ -233,7 +233,8 @@ long arch_do_domctl(xen_domctl_t *op, XE
             goto sendtrigger_out;
 
         ret = -ESRCH;
-        if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
+        if ( op->u.sendtrigger.vcpu >= d->max_vcpus ||
+             (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
             goto sendtrigger_out;
 
         ret = 0;
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/xen/dom_fw_dom0.c
--- a/xen/arch/ia64/xen/dom_fw_dom0.c   Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/xen/dom_fw_dom0.c   Mon Jun 29 11:26:05 2009 +0900
@@ -60,7 +60,7 @@ acpi_update_lsapic(struct acpi_subtable_
        if (!lsapic)
                return -EINVAL;
 
-       if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
+       if (lsapic_nbr < dom0->max_vcpus && dom0->vcpu[lsapic_nbr] != NULL)
                enable = 1;
        else
                enable = 0;
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/xen/domain.c        Mon Jun 29 11:26:05 2009 +0900
@@ -2118,6 +2118,7 @@ int __init construct_dom0(struct domain 
 
        /* Sanity! */
        BUG_ON(d != dom0);
+       BUG_ON(d->vcpu == NULL);
        BUG_ON(d->vcpu[0] == NULL);
        BUG_ON(v->is_initialised);
 
@@ -2222,7 +2223,7 @@ int __init construct_dom0(struct domain 
        //  (we should be able to deal with this... later)
 
        /* Mask all upcalls... */
-       for ( i = 1; i < MAX_VIRT_CPUS; i++ )
+       for ( i = 1; i < XEN_LEGACY_MAX_VCPUS; i++ )
            d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
 
        printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/xen/hypercall.c     Mon Jun 29 11:26:05 2009 +0900
@@ -84,7 +84,7 @@ fw_hypercall_ipi (struct pt_regs *regs)
        struct domain *d = current->domain;
 
        /* Be sure the target exists.  */
-       if (cpu > MAX_VIRT_CPUS)
+       if (cpu >= d->max_vcpus)
                return;
        targ = d->vcpu[cpu];
        if (targ == NULL)
diff -r 772e809e58ce -r 5839491bbf20 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/arch/ia64/xen/vhpt.c  Mon Jun 29 11:26:05 2009 +0900
@@ -231,7 +231,7 @@ domain_purge_swtc_entries_vcpu_dirty_mas
 {
        int vcpu;
 
-       for_each_vcpu_mask(vcpu, vcpu_dirty_mask) {
+       for_each_vcpu_mask(d, vcpu, vcpu_dirty_mask) {
                struct vcpu* v = d->vcpu[vcpu];
                if (!v->is_initialised)
                        continue;
@@ -445,7 +445,7 @@ __domain_flush_vtlb_track_entry(struct d
        }
     
        if (HAS_PERVCPU_VHPT(d)) {
-               for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) {
+               for_each_vcpu_mask(d, vcpu, entry->vcpu_dirty_mask) {
                        v = d->vcpu[vcpu];
                        if (!v->is_initialised)
                                continue;
diff -r 772e809e58ce -r 5839491bbf20 xen/include/asm-ia64/vcpumask.h
--- a/xen/include/asm-ia64/vcpumask.h   Mon Jun 29 11:23:53 2009 +0900
+++ b/xen/include/asm-ia64/vcpumask.h   Mon Jun 29 11:26:05 2009 +0900
@@ -31,12 +31,12 @@ static inline int __next_vcpu(int n, con
 }
 
 #if MAX_VIRT_CPUS > 1
-#define for_each_vcpu_mask(vcpu, mask)          \
+#define for_each_vcpu_mask(d, vcpu, mask)       \
     for ((vcpu) = first_vcpu(mask);             \
-         (vcpu) < MAX_VIRT_CPUS;                \
+         (vcpu) < d->max_vcpus;                 \
          (vcpu) = next_vcpu((vcpu), (mask)))
 #else /* NR_CPUS == 1 */
-#define for_each_vcpu_mask(vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++)
+#define for_each_vcpu_mask(d, vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; 
(vcpu)++)
 #endif /* NR_CPUS */
 
 #define vcpumask_scnprintf(buf, len, src) \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] replace MAX_VCPUS with d->max_vcpus where necessary., Xen patchbot-unstable <=