WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] SVM patch to add a host save area per core for the hyper

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] SVM patch to add a host save area per core for the hypervisor and also
From: Xen patchbot-3.0-testing <patchbot-3.0-testing@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 18 May 2006 14:48:23 +0000
Delivery-date: Thu, 18 May 2006 07:49:53 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5be39845d0183833dadc137e49e0d0c51bfe7674
# Parent  42eee0575ab746baf967d4a15ad583a2a4cba83c
SVM patch to add a host save area per core for the hypervisor and also
for the microcode.  The microcode area is not guaranteed to be
compatible with the vmcb layout, therefore will require it's own
"scratch pad".  Consolidate the per core areas into a single structure.
Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>
xen-unstable changeset:   10016:1d2e4a87300359d1f82a5a8f546798391c0d9afa
xen-unstable date:        Thu May 18 00:03:13 2006 +0100

Fix register corruption caused by c/s 9922.
From: Tom Woller <thomas.woller@xxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
xen-unstable changeset:   10015:0fbec683690550d6f4c9ee8a39cc0e98301af871
xen-unstable date:        Thu May 18 00:01:59 2006 +0100

SVM patch to cleanup guest event injection logic, remove unnecessary
event_injecting variable.
Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>
xen-unstable changeset:   10014:b4361ae1aabc2a6bbc65c9d9bdc9843915b2eb09
xen-unstable date:        Wed May 17 23:53:01 2006 +0100

SVM patch to reverse the logic of the general1 intercepts for easier
reading, also add the INVD intercept with print/eip increment only.
Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>
Signed-off-by: Mats Petersson <mats.petersson@xxxxxxx>
xen-unstable changeset:   10013:3d85f350a66a006fd5df2c228cfd8b75e3240984
xen-unstable date:        Wed May 17 23:51:39 2006 +0100

SVM patch to cleanup IOIO handling, do not use "real" mode but rather
the correct "bitness".
Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>
Signed-off-by: Mats Petersson <mats.petersson@xxxxxxx>
xen-unstable changeset:   10012:632ad28f2fd7a6602b08a9d054dc1b44efaf93f3
xen-unstable date:        Wed May 17 23:50:23 2006 +0100
---
 xen/arch/x86/hvm/svm/intr.c        |   17 -
 xen/arch/x86/hvm/svm/svm.c         |  337 ++++++++++++++++++++++---------------
 xen/arch/x86/hvm/svm/vmcb.c        |   25 --
 xen/include/asm-x86/hvm/svm/svm.h  |    8 
 xen/include/asm-x86/hvm/svm/vmcb.h |    1 
 5 files changed, 223 insertions(+), 165 deletions(-)

diff -r 42eee0575ab7 -r 5be39845d018 xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c       Tue May 16 19:52:53 2006 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c       Thu May 18 00:09:13 2006 +0100
@@ -131,17 +131,13 @@ asmlinkage void svm_intr_assist(void)
     ASSERT(vmcb);
 
     /* Check if an Injection is active */
-    if (v->arch.hvm_svm.injecting_event) {
        /* Previous Interrupt delivery caused this Intercept? */
        if (vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0)) 
{
            v->arch.hvm_svm.saved_irq_vector = vmcb->exitintinfo.fields.vector;
 //           printk("Injecting PF#: saving IRQ from ExitInfo\n");
            vmcb->exitintinfo.bytes = 0;
-
-           /* bail out, we won't be injecting an interrupt this time */
-           return;
+           re_injecting = 1;
        }
-    }
 
     /* Guest's interrputs masked? */
     rflags = vmcb->rflags;
@@ -150,16 +146,9 @@ asmlinkage void svm_intr_assist(void)
        /* bail out, we won't be injecting an interrupt this time */
        return;
     }
-
-    /* Interrupt delivery caused an Intercept? */
-    if (vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0)) {
-//        printk("Re-injecting IRQ from ExitInfo\n");
-        intr_vector = vmcb->exitintinfo.fields.vector;
-        vmcb->exitintinfo.bytes = 0;
-        re_injecting = 1;
-    }
+  
     /* Previous interrupt still pending? */
-    else if (vmcb->vintr.fields.irq) {
+    if (vmcb->vintr.fields.irq) {
 //        printk("Re-injecting IRQ from Vintr\n");
         intr_vector = vmcb->vintr.fields.vector;
         vmcb->vintr.bytes = 0;
diff -r 42eee0575ab7 -r 5be39845d018 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue May 16 19:52:53 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu May 18 00:09:13 2006 +0100
@@ -82,9 +82,11 @@ void svm_dump_regs(const char *from, str
 
 static void svm_relinquish_guest_resources(struct domain *d);
 
-/* Host save area */
-struct host_save_area *host_save_area[ NR_CPUS ] = {0};
-static struct asid_pool ASIDpool[NR_CPUS];
+
+extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
+
+/* Host save area and ASID glogal data */
+struct svm_percore_globals svm_globals[NR_CPUS];
 
 /*
  * Initializes the POOL of ASID used by the guests per core.
@@ -92,15 +94,15 @@ void asidpool_init( int core )
 void asidpool_init( int core )
 {
     int i;
-    ASIDpool[core].asid_lock = SPIN_LOCK_UNLOCKED;
-    spin_lock(&ASIDpool[core].asid_lock);
+    svm_globals[core].ASIDpool.asid_lock = SPIN_LOCK_UNLOCKED;
+    spin_lock(&svm_globals[core].ASIDpool.asid_lock);
     /* Host ASID is always in use */
-    ASIDpool[core].asid[INITIAL_ASID] = ASID_INUSE;
+    svm_globals[core].ASIDpool.asid[INITIAL_ASID] = ASID_INUSE;
     for( i=1; i<ASID_MAX; i++ )
     {
-       ASIDpool[core].asid[i] = ASID_AVAILABLE;
-    }
-    spin_unlock(&ASIDpool[core].asid_lock);
+       svm_globals[core].ASIDpool.asid[i] = ASID_AVAILABLE;
+    }
+    spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
 }
 
 
@@ -110,10 +112,10 @@ static int asidpool_fetch_next( struct v
     int i;   
     for( i = 1; i < ASID_MAX; i++ )
     {
-        if( ASIDpool[core].asid[i] == ASID_AVAILABLE )
+        if( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
         {
             vmcb->guest_asid = i;
-            ASIDpool[core].asid[i] = ASID_INUSE;
+            svm_globals[core].ASIDpool.asid[i] = ASID_INUSE;
             return i;
         }
     }
@@ -138,42 +140,42 @@ int asidpool_assign_next( struct vmcb_st
     int res = 1;
     static unsigned long cnt=0;
 
-    spin_lock(&ASIDpool[oldcore].asid_lock);
+    spin_lock(&svm_globals[oldcore].ASIDpool.asid_lock);
     if( retire_current && vmcb->guest_asid ) {
-       ASIDpool[oldcore].asid[ vmcb->guest_asid & (ASID_MAX-1) ] = 
ASID_RETIRED;
-    }
-    spin_unlock(&ASIDpool[oldcore].asid_lock);
-    spin_lock(&ASIDpool[newcore].asid_lock);
+       svm_globals[oldcore].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = 
ASID_RETIRED;
+    }
+    spin_unlock(&svm_globals[oldcore].ASIDpool.asid_lock);
+    spin_lock(&svm_globals[newcore].ASIDpool.asid_lock);
     if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
         if (svm_dbg_on)
             printk( "SVM: tlb(%ld)\n", cnt++ );
         /* FLUSH the TLB and all retired slots are made available */ 
         vmcb->tlb_control = 1;
         for( i = 1; i < ASID_MAX; i++ ) {
-            if( ASIDpool[newcore].asid[i] == ASID_RETIRED ) {
-                ASIDpool[newcore].asid[i] = ASID_AVAILABLE;
+            if( svm_globals[newcore].ASIDpool.asid[i] == ASID_RETIRED ) {
+                svm_globals[newcore].ASIDpool.asid[i] = ASID_AVAILABLE;
             }
         }
         /* Get the First slot available */ 
         res = asidpool_fetch_next( vmcb, newcore ) > 0;
     }
-    spin_unlock(&ASIDpool[newcore].asid_lock);
+    spin_unlock(&svm_globals[newcore].ASIDpool.asid_lock);
     return res;
 }
 
 void asidpool_retire( struct vmcb_struct *vmcb, int core )
 {
-   spin_lock(&ASIDpool[core].asid_lock);
+   spin_lock(&svm_globals[core].ASIDpool.asid_lock);
    if( vmcb->guest_asid ) {
-       ASIDpool[core].asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
+       svm_globals[core].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = 
ASID_RETIRED;
    }
-   spin_unlock(&ASIDpool[core].asid_lock);
-}
-
-static inline void svm_inject_exception(struct vmcb_struct *vmcb, 
-                                        int trap, int ev, int error_code)
+   spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
+}
+
+static inline void svm_inject_exception(struct vcpu *v, int trap, int ev, int 
error_code)
 {
     eventinj_t event;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     event.bytes = 0;            
     event.fields.v = 1;
@@ -198,8 +200,13 @@ void stop_svm(void)
     wrmsr(MSR_EFER, eax, edx);
  
     /* release the HSA */
-    free_host_save_area( host_save_area[ cpu ] );
-    host_save_area[ cpu ] = NULL;
+    free_host_save_area( svm_globals[cpu].hsa );
+    free_host_save_area( svm_globals[cpu].scratch_hsa );
+    svm_globals[cpu].hsa    = NULL;
+    svm_globals[cpu].hsa_pa = 0;
+    svm_globals[cpu].scratch_hsa    = NULL;
+    svm_globals[cpu].scratch_hsa_pa = 0;
+    wrmsr(MSR_K8_VM_HSAVE_PA, 0, 0 );
 
     printk("AMD SVM Extension is disabled.\n");
 }
@@ -329,7 +336,7 @@ static inline int long_mode_do_msr_write
         if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
         {
             printk("trying to set reserved bit in EFER\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
             return 0;
         }
 
@@ -343,7 +350,7 @@ static inline int long_mode_do_msr_write
             {
                 printk("trying to set LME bit when "
                        "in paging mode or PAE bit is not set\n");
-                svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+                svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
                 return 0;
             }
             set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
@@ -367,7 +374,7 @@ static inline int long_mode_do_msr_write
         if (!IS_CANO_ADDRESS(msr_content))
         {
             HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
         }
 
         if (regs->ecx == MSR_FS_BASE)
@@ -455,16 +462,20 @@ int start_svm(void)
     rdmsr(MSR_EFER, eax, edx);
     eax |= EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
-    asidpool_init(smp_processor_id());    
+    asidpool_init( cpu );    
     printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
 
     /* Initialize the HSA for this core */
-    host_save_area[ cpu ] = alloc_host_save_area();
-    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
+    svm_globals[cpu].hsa = alloc_host_save_area();
+    phys_hsa = (u64) virt_to_maddr( svm_globals[cpu].hsa ); 
     phys_hsa_lo = (u32) phys_hsa;
     phys_hsa_hi = (u32) (phys_hsa >> 32);    
     wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
-    
+    svm_globals[cpu].hsa_pa = phys_hsa;
+  
+    svm_globals[cpu].scratch_hsa    = alloc_host_save_area();
+    svm_globals[cpu].scratch_hsa_pa = (u64)virt_to_maddr( 
svm_globals[cpu].scratch_hsa );
+
     /* Setup HVM interfaces */
     hvm_funcs.disable = stop_svm;
 
@@ -546,7 +557,6 @@ static inline int svm_do_debugout(unsign
     return 1;
 }
 
-
 void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -577,8 +587,6 @@ void svm_store_cpu_user_regs(struct cpu_
     regs->ds     = vmcb->ds.sel;
     regs->es     = vmcb->es.sel;
     regs->ss     = vmcb->ss.sel;
-    regs->fs     = vmcb->fs.sel;
-    regs->gs     = vmcb->gs.sel;
 }
 
 /* XXX Use svm_load_cpu_guest_regs instead */
@@ -594,12 +602,6 @@ void svm_load_cpu_user_regs(struct vcpu 
     vmcb->rflags   = regs->eflags;
     vmcb->cs.sel   = regs->cs;
     vmcb->rip      = regs->eip;
-
-    vmcb->ds.sel   = regs->ds;
-    vmcb->es.sel   = regs->es;
-    vmcb->fs.sel   = regs->fs;
-    vmcb->gs.sel   = regs->gs;
-
     if (regs->eflags & EF_TF)
         *intercepts |= EXCEPTION_BITMAP_DB;
     else
@@ -896,10 +898,9 @@ static void svm_do_general_protection_fa
             (unsigned long)regs->eax, (unsigned long)regs->ebx,
             (unsigned long)regs->ecx, (unsigned long)regs->edx,
             (unsigned long)regs->esi, (unsigned long)regs->edi);
-
-    
+      
     /* Reflect it back into the guest */
-    svm_inject_exception(vmcb, TRAP_gp_fault, 1, error_code);
+    svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
 }
 
 /* Reserved bits: [31:14], [12:1] */
@@ -1117,19 +1118,17 @@ static void svm_dr_access (struct vcpu *
 }
 
 
-static unsigned int check_for_null_selector(struct vmcb_struct *vmcb, 
-        unsigned int dir, unsigned long *base, unsigned int real)
-
+static void svm_get_prefix_info(struct vmcb_struct *vmcb, 
+               unsigned int dir, segment_selector_t **seg, unsigned int *asize)
 {
     unsigned char inst[MAX_INST_LEN];
-    segment_selector_t seg;
     int i;
 
     memset(inst, 0, MAX_INST_LEN);
     if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst)) 
             != MAX_INST_LEN) 
     {
-        printk("check_for_null_selector: get guest instruction failed\n");
+        printk("%s: get guest instruction failed\n", __func__);
         domain_crash_synchronous();
     }
 
@@ -1141,7 +1140,6 @@ static unsigned int check_for_null_selec
         case 0xf2: /* REPNZ */
         case 0xf0: /* LOCK */
         case 0x66: /* data32 */
-        case 0x67: /* addr32 */
 #if __x86_64__
             /* REX prefixes */
         case 0x40:
@@ -1163,89 +1161,134 @@ static unsigned int check_for_null_selec
         case 0x4f:
 #endif
             continue;
+        case 0x67: /* addr32 */
+            *asize ^= 48;        /* Switch 16/32 bits */
+            continue;
         case 0x2e: /* CS */
-            seg = vmcb->cs;
+            *seg = &vmcb->cs;
+            continue;
+        case 0x36: /* SS */
+            *seg = &vmcb->ss;
+            continue;
+        case 0x26: /* ES */
+            *seg = &vmcb->es;
+            continue;
+        case 0x64: /* FS */
+            *seg = &vmcb->fs;
+            continue;
+        case 0x65: /* GS */
+            *seg = &vmcb->gs;
+            continue;
+        case 0x3e: /* DS */
+            *seg = &vmcb->ds;
+            continue;
+        default:
             break;
-        case 0x36: /* SS */
-            seg = vmcb->ss;
-            break;
-        case 0x26: /* ES */
-            seg = vmcb->es;
-            break;
-        case 0x64: /* FS */
-            seg = vmcb->fs;
-            break;
-        case 0x65: /* GS */
-            seg = vmcb->gs;
-            break;
-        case 0x3e: /* DS */
-            /* FALLTHROUGH */
-            seg = vmcb->ds;
-            break;
-        default:
-            if (dir == IOREQ_READ) /* IN/INS instruction? */
-                seg = vmcb->es;
-            else
-                seg = vmcb->ds;
-        }
-        
-        if (base)
-            *base = seg.base;
-
-        return seg.attributes.fields.p;
-    }
-
-    ASSERT(0);
-    return 0;
+        }
+        return;
+    }
 }
 
 
 /* Get the address of INS/OUTS instruction */
-static inline unsigned long svm_get_io_address(struct vmcb_struct *vmcb, 
-        struct cpu_user_regs *regs, unsigned int dir, unsigned int real)
-{
-    unsigned long addr = 0;
-    unsigned long base = 0;
-
-    check_for_null_selector(vmcb, dir, &base, real);
+static inline int svm_get_io_address(struct vcpu *v, 
+               struct cpu_user_regs *regs, unsigned int dir, 
+        unsigned long *count, unsigned long *addr)
+{
+    unsigned long        reg;
+    unsigned int         asize = 0;
+    unsigned int         isize;
+    int                  long_mode;
+    ioio_info_t          info;
+    segment_selector_t  *seg = NULL;
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    info.bytes = vmcb->exitinfo1;
+
+    /* If we're in long mode, we shouldn't check the segment presence and 
limit */
+    long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
+
+    /* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit. 
+     * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 
bit. 
+     */
+    asize = (long_mode)?64:((vmcb->cs.attributes.fields.db)?32:16);
+
+
+    /* The ins/outs instructions are single byte, so if we have got more 
+     * than one byte (+ maybe rep-prefix), we have some prefix so we need 
+     * to figure out what it is...
+     */
+    isize = vmcb->exitinfo2 - vmcb->rip;
+
+    if (info.fields.rep)
+        isize --;
+
+    if (isize > 1) 
+    {
+        svm_get_prefix_info(vmcb, dir, &seg, &asize);
+    }
+
+    ASSERT(dir == IOREQ_READ || dir == IOREQ_WRITE);
 
     if (dir == IOREQ_WRITE)
     {
-        if (real)
-            addr = (regs->esi & 0xFFFF) + base;
-        else
-            addr = regs->esi + base;
+        reg = regs->esi;
+        if (!seg)               /* If no prefix, used DS. */
+            seg = &vmcb->ds;
     }
     else
     {
-        if (real)
-            addr = (regs->edi & 0xFFFF) + base;
-        else
-            addr = regs->edi + base;
-    }
-
-    return addr;
+        reg = regs->edi;
+        seg = &vmcb->es;        /* Note: This is ALWAYS ES. */
+    }
+
+    /* If the segment isn't present, give GP fault! */
+    if (!long_mode && !seg->attributes.fields.p) 
+    {
+        svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
+        return 0;
+    }
+
+    if (asize == 16) 
+    {
+        *addr = (reg & 0xFFFF);
+        *count = regs->ecx & 0xffff;
+    }
+    else
+    {
+        *addr = reg;
+        *count = regs->ecx;
+    }
+
+    if (!long_mode) {
+        if (*addr > seg->limit) 
+        {
+            svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
+            return 0;
+        } 
+        else 
+        {
+            *addr += seg->base;
+        }
+    }
+    
+
+    return 1;
 }
 
 
 static void svm_io_instruction(struct vcpu *v, struct cpu_user_regs *regs) 
 {
     struct mmio_op *mmio_opp;
-    unsigned long eip, cs, eflags, cr0;
-    unsigned long port;
-    unsigned int real, size, dir;
+    unsigned int port;
+    unsigned int size, dir;
     ioio_info_t info;
-
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     ASSERT(vmcb);
     mmio_opp = &current->arch.hvm_vcpu.mmio_op;
     mmio_opp->instr = INSTR_PIO;
     mmio_opp->flags = 0;
-
-    eip = vmcb->rip;
-    cs =  vmcb->cs.sel;
-    eflags = vmcb->rflags;
 
     info.bytes = vmcb->exitinfo1;
 
@@ -1258,27 +1301,33 @@ static void svm_io_instruction(struct vc
     else 
         size = 1;
 
-    cr0 = vmcb->cr0;
-    real = (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
-
     HVM_DBG_LOG(DBG_LEVEL_IO, 
-                "svm_io_instruction: port 0x%lx real %d, eip=%lx:%lx, "
+                "svm_io_instruction: port 0x%x eip=%lx:%lx, "
                 "exit_qualification = %lx",
-                (unsigned long) port, real, cs, eip, (unsigned 
long)info.bytes);
+                port, vmcb->cs.sel, vmcb->rip, (unsigned long)info.bytes);
     /* string instruction */
     if (info.fields.str)
     { 
-        unsigned long addr, count = 1;
+        unsigned long addr, count;
         int sign = regs->eflags & EF_DF ? -1 : 1;
 
-        /* Need the original rip, here. */
-        addr = svm_get_io_address(vmcb, regs, dir, real);
+        if (!svm_get_io_address(v, regs, dir, &count, &addr)) 
+        {
+            /* We failed to get a valid address, so don't do the IO operation 
- 
+             * it would just get worse if we do! Hopefully the guest is handing
+             * gp-faults... 
+             */
+            return;
+        }
 
         /* "rep" prefix */
         if (info.fields.rep) 
         {
             mmio_opp->flags |= REPZ;
-            count = real ? regs->ecx & 0xFFFF : regs->ecx;
+        }
+        else 
+        {
+            count = 1;
         }
 
         /*
@@ -1367,7 +1416,7 @@ static int svm_set_cr0(unsigned long val
                     &v->arch.hvm_svm.cpu_state))
         {
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
         }
 
         if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
@@ -1441,7 +1490,7 @@ static int svm_set_cr0(unsigned long val
      */
     if ((value & X86_CR0_PE) == 0) {
        if (value & X86_CR0_PG) {
-            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             return 0;
         }
 
@@ -1688,7 +1737,7 @@ static int mov_to_cr(int gpreg, int cr, 
         } else {
             if (test_bit(SVM_CPU_STATE_LMA_ENABLED,
                          &v->arch.hvm_svm.cpu_state)) {
-                svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
+                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             }
             clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
         }
@@ -1804,7 +1853,8 @@ static int svm_cr_access(struct vcpu *v,
         break;
 
     case INSTR_SMSW:
-        svm_dump_inst(svm_rip2pointer(vmcb));
+        if (svm_dbg_on)
+            svm_dump_inst(svm_rip2pointer(vmcb));
         value = v->arch.hvm_svm.cpu_shadow_cr0;
         gpreg = decode_src_reg(prefix, buffer[index+2]);
         set_reg(gpreg, value, regs, vmcb);
@@ -1941,9 +1991,25 @@ static inline void svm_vmexit_do_hlt(str
 }
 
 
-static inline void svm_vmexit_do_mwait(void)
-{
-}
+static void svm_vmexit_do_invd(struct vmcb_struct *vmcb)
+{
+    int  inst_len;
+    
+    /* Invalidate the cache - we can't really do that safely - maybe we should 
+     * WBINVD, but I think it's just fine to completely ignore it - we should 
+     * have cache-snooping that solves it anyways. -- Mats P. 
+     */
+
+    /* Tell the user that we did this - just in case someone runs some really 
weird 
+     * operating system and wants to know why it's not working as it should...
+     */
+    printk("INVD instruction intercepted - ignored\n");
+    
+    inst_len = __get_instruction_length(vmcb, INSTR_INVD, NULL);
+    __update_guest_eip(vmcb, inst_len);
+}    
+        
+
 
 
 #ifdef XEN_DEBUGGER
@@ -2005,7 +2071,7 @@ void svm_handle_invlpg(const short invlp
         __update_guest_eip(vmcb, inst_len);
 
         /* 
-         * The address is implicit on this instruction At the moment, we don't
+         * The address is implicit on this instruction. At the moment, we don't
          * use ecx (ASID) to identify individual guests pages 
          */
         g_vaddr = regs->eax;
@@ -2439,7 +2505,6 @@ asmlinkage void svm_vmexit_handler(struc
 
     exit_reason = vmcb->exitcode;
     save_svm_cpu_user_regs(v, &regs);
-    v->arch.hvm_svm.injecting_event = 0;
 
     vmcb->tlb_control = 1;
 
@@ -2603,7 +2668,7 @@ asmlinkage void svm_vmexit_handler(struc
         if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
             domain_pause_for_debugger();
         else 
-            svm_inject_exception(vmcb, TRAP_int3, 0, 0);
+            svm_inject_exception(v, TRAP_int3, 0, 0);
 #endif
         break;
 
@@ -2614,7 +2679,6 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_EXCEPTION_GP:
         /* This should probably not be trapped in the future */
         regs.error_code = vmcb->exitinfo1;
-        v->arch.hvm_svm.injecting_event = 1;
         svm_do_general_protection_fault(v, &regs);
         break;  
 
@@ -2634,9 +2698,8 @@ asmlinkage void svm_vmexit_handler(struc
 //printk("PF1\n");
         if (!(error = svm_do_page_fault(va, &regs))) 
         {
-            v->arch.hvm_svm.injecting_event = 1;
             /* Inject #PG using Interruption-Information Fields */
-            svm_inject_exception(vmcb, TRAP_page_fault, 1, regs.error_code);
+            svm_inject_exception(v, TRAP_page_fault, 1, regs.error_code);
 
             v->arch.hvm_svm.cpu_cr2 = va;
             vmcb->cr2 = va;
@@ -2653,6 +2716,11 @@ asmlinkage void svm_vmexit_handler(struc
 
     case VMEXIT_INTR:
         raise_softirq(SCHEDULE_SOFTIRQ);
+        break;
+
+
+    case VMEXIT_INVD:
+        svm_vmexit_do_invd(vmcb);
         break;
 
     case VMEXIT_GDTR_WRITE:
@@ -2848,6 +2916,9 @@ asmlinkage void svm_asid(void)
         v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
         clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
     }
+
+    /* make sure the HSA is set for the current core */
+    set_hsa_to_guest( &v->arch.hvm_svm );
 }
 
 /*
diff -r 42eee0575ab7 -r 5be39845d018 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue May 16 19:52:53 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu May 18 00:09:13 2006 +0100
@@ -36,7 +36,7 @@
 #include <xen/kernel.h>
 #include <xen/domain_page.h>
 
-extern struct host_save_area *host_save_area[];
+extern struct svm_percore_globals svm_globals[];
 extern int svm_dbg_on;
 extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
                                   int oldcore, int newcore);
@@ -117,16 +117,12 @@ static int construct_vmcb_controls(struc
 
     /* mask off all general 1 intercepts except those listed here */
     vmcb->general1_intercepts = 
-        ~(GENERAL1_INTERCEPT_CR0_SEL_WRITE | GENERAL1_INTERCEPT_VINTR      | 
-          GENERAL1_INTERCEPT_IDTR_READ     | GENERAL1_INTERCEPT_IDTR_WRITE | 
-          GENERAL1_INTERCEPT_GDTR_READ     | GENERAL1_INTERCEPT_GDTR_WRITE |
-          GENERAL1_INTERCEPT_LDTR_READ     | GENERAL1_INTERCEPT_LDTR_WRITE | 
-          GENERAL1_INTERCEPT_TR_READ       | GENERAL1_INTERCEPT_TR_WRITE   |
-          GENERAL1_INTERCEPT_RDTSC         | GENERAL1_INTERCEPT_PUSHF      |
-          GENERAL1_INTERCEPT_SWINT         | GENERAL1_INTERCEPT_POPF       | 
-          GENERAL1_INTERCEPT_IRET          | GENERAL1_INTERCEPT_PAUSE      |
-          GENERAL1_INTERCEPT_TASK_SWITCH
-        );
+        GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
+        GENERAL1_INTERCEPT_SMI          | GENERAL1_INTERCEPT_INIT        |
+        GENERAL1_INTERCEPT_CPUID        | GENERAL1_INTERCEPT_INVD        |
+        GENERAL1_INTERCEPT_HLT          | GENERAL1_INTERCEPT_INVLPG      | 
+        GENERAL1_INTERCEPT_INVLPGA      | GENERAL1_INTERCEPT_IOIO_PROT   |
+        GENERAL1_INTERCEPT_MSR_PROT     | GENERAL1_INTERCEPT_SHUTDOWN_EVT;
 
     /* turn on the general 2 intercepts */
     vmcb->general2_intercepts = 
@@ -421,7 +417,6 @@ void svm_do_launch(struct vcpu *v)
 
     v->arch.schedule_tail = arch_svm_do_resume;
 
-    v->arch.hvm_svm.injecting_event  = 0;
     v->arch.hvm_svm.saved_irq_vector = -1;
 
     svm_set_guest_time(v, 0);
@@ -435,8 +430,7 @@ void svm_do_launch(struct vcpu *v)
 
 void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
 {
-    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
-    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
+  arch_svm->host_save_pa = svm_globals[ smp_processor_id() ].scratch_hsa_pa;
 }
 
 /* 
@@ -449,9 +443,6 @@ void svm_do_resume(struct vcpu *v)
 
     svm_stts(v);
 
-    /* make sure the HSA is set for the current core */
-    set_hsa_to_guest( &v->arch.hvm_svm );
-    
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
     if ( vpit->first_injected ) {
         if ( v->domain->arch.hvm_domain.guest_time ) {
diff -r 42eee0575ab7 -r 5be39845d018 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Tue May 16 19:52:53 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Thu May 18 00:09:13 2006 +0100
@@ -70,6 +70,14 @@ struct asid_pool {
     u32 asid[ASID_MAX];
 };
 
+struct svm_percore_globals {
+  void *hsa;
+  u64  hsa_pa;
+  void *scratch_hsa;
+  u64  scratch_hsa_pa;
+  struct asid_pool ASIDpool;
+};
+
 #define SVM_REG_EAX (0) 
 #define SVM_REG_ECX (1) 
 #define SVM_REG_EDX (2) 
diff -r 42eee0575ab7 -r 5be39845d018 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue May 16 19:52:53 2006 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Thu May 18 00:09:13 2006 +0100
@@ -440,7 +440,6 @@ struct arch_svm_struct {
     u32                 *iopm;
     u32                 *msrpm;
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
-    int                 injecting_event;
     int                 saved_irq_vector;
     u32                 launch_core;
     u32                 asid_core;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>