[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [PATCH 2/2][IA64] configure VHPT size per domain: ia64 part



IA64 part.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

# HG changeset patch
# User Kouya Shimura <kouya@xxxxxxxxxxxxxx>
# Date 1192770850 -32400
# Node ID 630a8cf763c0a00766ea2c5528ad148740edad8a
# Parent  b4278beaf3549f410a5a6086dbd8af93c495aeac
IA64 part.

diff -r b4278beaf354 -r 630a8cf763c0 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Oct 17 13:12:03 2007 +0100
+++ b/xen/arch/ia64/vmx/vmmu.c  Fri Oct 19 14:14:10 2007 +0900
@@ -22,6 +22,7 @@
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_pal_vsa.h>
 #include <xen/sched-if.h>
+#include <asm/vhpt.h>
 
 static int default_vtlb_sz = DEFAULT_VTLB_SZ;
 static int default_vhpt_sz = DEFAULT_VHPT_SZ;
@@ -38,17 +39,6 @@ static void __init parse_vtlb_size(char 
     }
 }
 
-static int canonicalize_vhpt_size(int sz)
-{
-    /* minimum 32KB */
-    if (sz < 15)
-        return 15;
-    /* maximum 8MB (since purging TR is hard coded) */
-    if (sz > IA64_GRANULE_SHIFT - 1)
-        return IA64_GRANULE_SHIFT - 1;
-    return sz;
-}
-
 static void __init parse_vhpt_size(char *s)
 {
     int sz = parse_size_and_unit(s, NULL);
@@ -96,8 +86,14 @@ static int init_domain_vhpt(struct vcpu 
 static int init_domain_vhpt(struct vcpu *v)
 {
     int rc;
-
-    rc = thash_alloc(&(v->arch.vhpt), default_vhpt_sz, "vhpt");
+    u64 size = v->domain->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE];
+
+    if (size == 0)
+        size = default_vhpt_sz;
+    else
+        size = canonicalize_vhpt_size(size);
+
+    rc = thash_alloc(&(v->arch.vhpt), size, "vhpt");
     v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
     return rc;
 }
diff -r b4278beaf354 -r 630a8cf763c0 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Wed Oct 17 13:12:03 2007 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Fri Oct 19 14:14:10 2007 +0900
@@ -93,6 +93,9 @@ long arch_do_domctl(xen_domctl_t *op, XE
             ds->maxmem = d->arch.convmem_end;
             ds->xsi_va = d->arch.shared_info_va;
             ds->hypercall_imm = d->arch.breakimm;
+#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
+            ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
+#endif
             /* Copy back.  */
             if ( copy_to_guest(u_domctl, op, 1) )
                 ret = -EFAULT;
@@ -116,6 +119,20 @@ long arch_do_domctl(xen_domctl_t *op, XE
                     for_each_vcpu (d, v)
                         v->arch.breakimm = d->arch.breakimm;
                 }
+#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
+                if (ds->vhpt_size_log2 == -1) {
+                    d->arch.has_pervcpu_vhpt = 0;
+                    ds->vhpt_size_log2 = -1;
+                    printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
+                           "domain %d VHPT is global.\n", d->domain_id);
+                } else {
+                    d->arch.has_pervcpu_vhpt = 1;
+                    d->arch.vhpt_size_log2 = ds->vhpt_size_log2;
+                    printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
+                           "domain %d VHPT is per vcpu. size=2**%d\n",
+                           d->domain_id, ds->vhpt_size_log2);
+                }
+#endif
                 if (ds->xsi_va)
                     d->arch.shared_info_va = ds->xsi_va;
                 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
diff -r b4278beaf354 -r 630a8cf763c0 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Wed Oct 17 13:12:03 2007 +0100
+++ b/xen/arch/ia64/xen/vhpt.c  Fri Oct 19 14:14:10 2007 +0900
@@ -28,12 +28,13 @@ DEFINE_PER_CPU(volatile u32, vhpt_tlbflu
 #endif
 
 static void
-__vhpt_flush(unsigned long vhpt_maddr)
+__vhpt_flush(unsigned long vhpt_maddr, unsigned long vhpt_size_log2)
 {
        struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
+       unsigned long num_entries = 1 << (vhpt_size_log2 - 5);
        int i;
 
-       for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
+       for (i = 0; i < num_entries; i++, v++)
                v->ti_tag = INVALID_TI_TAG;
 }
 
@@ -42,7 +43,7 @@ local_vhpt_flush(void)
 {
        /* increment flush clock before flush */
        u32 flush_time = tlbflush_clock_inc_and_return();
-       __vhpt_flush(__ia64_per_cpu_var(vhpt_paddr));
+       __vhpt_flush(__ia64_per_cpu_var(vhpt_paddr), VHPT_SIZE_LOG2);
        /* this must be after flush */
        tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp),
                             flush_time);
@@ -52,17 +53,23 @@ void
 void
 vcpu_vhpt_flush(struct vcpu* v)
 {
-       __vhpt_flush(vcpu_vhpt_maddr(v));
+       unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2;
+#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
+       if (HAS_PERVCPU_VHPT(v->domain))
+               vhpt_size_log2 = v->arch.pta.size;
+#endif
+       __vhpt_flush(vcpu_vhpt_maddr(v), vhpt_size_log2);
        perfc_incr(vcpu_vhpt_flush);
 }
 
 static void
-vhpt_erase(unsigned long vhpt_maddr)
+vhpt_erase(unsigned long vhpt_maddr, unsigned long vhpt_size_log2)
 {
        struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
+       unsigned long num_entries = 1 << (vhpt_size_log2 - 5);
        int i;
 
-       for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
+       for (i = 0; i < num_entries; i++, v++) {
                v->itir = 0;
                v->CChain = 0;
                v->page_flags = 0;
@@ -140,7 +147,7 @@ void __init vhpt_init(void)
        __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
        printk(XENLOG_DEBUG "vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
               paddr, __get_cpu_var(vhpt_pend));
-       vhpt_erase(paddr);
+       vhpt_erase(paddr, VHPT_SIZE_LOG2);
        // we don't enable VHPT here.
        // context_switch() or schedule_tail() does it.
 }
@@ -151,6 +158,11 @@ pervcpu_vhpt_alloc(struct vcpu *v)
 {
        unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2;
 
+       if (v->domain->arch.vhpt_size_log2 > 0)
+           vhpt_size_log2 =
+               canonicalize_vhpt_size(v->domain->arch.vhpt_size_log2);
+       printk(XENLOG_DEBUG "%s vhpt_size_log2=%ld\n",
+              __func__, vhpt_size_log2);
        v->arch.vhpt_entries =
                (1UL << vhpt_size_log2) / sizeof(struct vhpt_lf_entry);
        v->arch.vhpt_page =
@@ -164,11 +176,11 @@ pervcpu_vhpt_alloc(struct vcpu *v)
 
        v->arch.pta.val = 0; // to zero reserved bits
        v->arch.pta.ve = 1; // enable vhpt
-       v->arch.pta.size = VHPT_SIZE_LOG2;
+       v->arch.pta.size = vhpt_size_log2;
        v->arch.pta.vf = 1; // long format
        v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
 
-       vhpt_erase(v->arch.vhpt_maddr);
+       vhpt_erase(v->arch.vhpt_maddr, vhpt_size_log2);
        smp_mb(); // per vcpu vhpt may be used by another physical cpu.
        return 0;
 }
@@ -178,7 +190,7 @@ pervcpu_vhpt_free(struct vcpu *v)
 {
        if (likely(v->arch.vhpt_page != NULL))
                free_domheap_pages(v->arch.vhpt_page,
-                                  VHPT_SIZE_LOG2 - PAGE_SHIFT);
+                                  v->arch.pta.size - PAGE_SHIFT);
 }
 #endif
 
diff -r b4278beaf354 -r 630a8cf763c0 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Wed Oct 17 13:12:03 2007 +0100
+++ b/xen/include/asm-ia64/domain.h     Fri Oct 19 14:14:10 2007 +0900
@@ -123,6 +123,7 @@ struct arch_domain {
             unsigned int is_vti : 1;
 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
             unsigned int has_pervcpu_vhpt : 1;
+            unsigned int vhpt_size_log2 : 6;
 #endif
         };
     };
diff -r b4278beaf354 -r 630a8cf763c0 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Wed Oct 17 13:12:03 2007 +0100
+++ b/xen/include/asm-ia64/vhpt.h       Fri Oct 19 14:14:10 2007 +0900
@@ -84,5 +84,18 @@ vcpu_pta(struct vcpu* v)
         (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
 }
 
+static inline int
+canonicalize_vhpt_size(int sz)
+{
+    /* minimum 32KB */
+    if (sz < 15)
+        return 15;
+    /* maximum 8MB (since purging TR is hard coded) */
+    if (sz > IA64_GRANULE_SHIFT - 1)
+        return IA64_GRANULE_SHIFT - 1;
+    return sz;
+}
+
+
 #endif /* !__ASSEMBLY */
 #endif
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.