WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86_emulate: Check I/O port accesses.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86_emulate: Check I/O port accesses.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 27 Mar 2008 18:40:08 -0700
Delivery-date: Thu, 27 Mar 2008 18:40:07 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1206638722 0
# Node ID e7abfeee280832606426baed80190a9e337ccf4e
# Parent  892a20f824a7aa5c5ed57ce80fc61f1dffd4b4d2
x86_emulate: Check I/O port accesses.
Implements both CPL/IOPL and TSS-bitmap checks.
Requires changes to read/write callback hooks to disable user-access
checks when walking pagetables on behalf of GDT/LDT/TSS accesses.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c        |   62 +++++++++++------
 xen/arch/x86/hvm/hvm.c            |  133 +++++++++++++++++---------------------
 xen/arch/x86/hvm/svm/emulate.c    |    4 -
 xen/arch/x86/mm/shadow/common.c   |    8 +-
 xen/arch/x86/x86_emulate.c        |   65 ++++++++++++++++--
 xen/include/asm-x86/hvm/support.h |   18 +++--
 6 files changed, 177 insertions(+), 113 deletions(-)

diff -r 892a20f824a7 -r e7abfeee2808 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Thu Mar 27 17:25:22 2008 +0000
@@ -94,19 +94,18 @@ static int hvmemul_do_mmio(
  * Convert addr from linear to physical form, valid over the range
  * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
  * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
+ * @pfec indicates the access checks to be performed during page-table walks.
  */
 static int hvmemul_linear_to_phys(
     unsigned long addr,
     paddr_t *paddr,
     unsigned int bytes_per_rep,
     unsigned long *reps,
-    enum hvm_access_type access_type,
+    uint32_t pfec,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
     unsigned long pfn, npfn, done, todo, i;
-    struct segment_register *sreg;
-    uint32_t pfec;
 
     /* Clip repetitions to a sensible maximum. */
     *reps = min_t(unsigned long, *reps, 4096);
@@ -119,14 +118,6 @@ static int hvmemul_linear_to_phys(
     }
 
     *paddr = addr & ~PAGE_MASK;
-
-    /* Gather access-type information for the page walks. */
-    sreg = hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
-    pfec = PFEC_page_present;
-    if ( sreg->attr.fields.dpl == 3 )
-        pfec |= PFEC_user_mode;
-    if ( access_type == hvm_access_write )
-        pfec |= PFEC_write_access;
 
     /* Get the first PFN in the range. */
     if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
@@ -216,6 +207,7 @@ static int __hvmemul_read(
 {
     struct vcpu *curr = current;
     unsigned long addr;
+    uint32_t pfec = PFEC_page_present;
     paddr_t gpa;
     int rc;
 
@@ -237,9 +229,13 @@ static int __hvmemul_read(
             return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
     }
 
+    if ( (seg != x86_seg_none) &&
+         (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
+        pfec |= PFEC_user_mode;
+
     rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_virt(val, addr, bytes) :
-          hvm_copy_from_guest_virt(val, addr, bytes));
+          hvm_fetch_from_guest_virt(val, addr, bytes, pfec) :
+          hvm_copy_from_guest_virt(val, addr, bytes, pfec));
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         return X86EMUL_EXCEPTION;
 
@@ -251,7 +247,7 @@ static int __hvmemul_read(
             return X86EMUL_UNHANDLEABLE;
 
         rc = hvmemul_linear_to_phys(
-            addr, &gpa, bytes, &reps, access_type, hvmemul_ctxt);
+            addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
             return rc;
 
@@ -307,6 +303,7 @@ static int hvmemul_write(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     struct vcpu *curr = current;
     unsigned long addr;
+    uint32_t pfec = PFEC_page_present | PFEC_write_access;
     paddr_t gpa;
     int rc;
 
@@ -325,7 +322,11 @@ static int hvmemul_write(
                                    0, 0, NULL);
     }
 
-    rc = hvm_copy_to_guest_virt(addr, &val, bytes);
+    if ( (seg != x86_seg_none) &&
+         (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
+        pfec |= PFEC_user_mode;
+
+    rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec);
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         return X86EMUL_EXCEPTION;
 
@@ -334,7 +335,7 @@ static int hvmemul_write(
         unsigned long reps = 1;
 
         rc = hvmemul_linear_to_phys(
-            addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt);
+            addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
             return rc;
 
@@ -367,6 +368,7 @@ static int hvmemul_rep_ins(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long addr;
+    uint32_t pfec = PFEC_page_present | PFEC_write_access;
     paddr_t gpa;
     int rc;
 
@@ -376,8 +378,11 @@ static int hvmemul_rep_ins(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        pfec |= PFEC_user_mode;
+
     rc = hvmemul_linear_to_phys(
-        addr, &gpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt);
+        addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -396,6 +401,7 @@ static int hvmemul_rep_outs(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long addr;
+    uint32_t pfec = PFEC_page_present;
     paddr_t gpa;
     int rc;
 
@@ -405,8 +411,11 @@ static int hvmemul_rep_outs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        pfec |= PFEC_user_mode;
+
     rc = hvmemul_linear_to_phys(
-        addr, &gpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt);
+        addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -427,6 +436,7 @@ static int hvmemul_rep_movs(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long saddr, daddr;
     paddr_t sgpa, dgpa;
+    uint32_t pfec = PFEC_page_present;
     p2m_type_t p2mt;
     int rc;
 
@@ -442,13 +452,17 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        pfec |= PFEC_user_mode;
+
     rc = hvmemul_linear_to_phys(
-        saddr, &sgpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt);
+        saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
     rc = hvmemul_linear_to_phys(
-        daddr, &dgpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt);
+        daddr, &dgpa, bytes_per_rep, reps,
+        pfec | PFEC_write_access, hvmemul_ctxt);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -696,7 +710,7 @@ int hvm_emulate_one(
 {
     struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
     struct vcpu *curr = current;
-    uint32_t new_intr_shadow;
+    uint32_t new_intr_shadow, pfec = PFEC_page_present;
     unsigned long addr;
     int rc;
 
@@ -712,6 +726,9 @@ int hvm_emulate_one(
         hvmemul_ctxt->ctxt.sp_size =
             hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
     }
+
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = regs->eip;
     hvmemul_ctxt->insn_buf_bytes =
@@ -720,7 +737,8 @@ int hvm_emulate_one(
             regs->eip, sizeof(hvmemul_ctxt->insn_buf),
             hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
          !hvm_fetch_from_guest_virt_nofault(
-             hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf)))
+             hvmemul_ctxt->insn_buf, addr,
+             sizeof(hvmemul_ctxt->insn_buf), pfec))
         ? sizeof(hvmemul_ctxt->insn_buf) : 0;
 
     hvmemul_ctxt->exn_pending = 0;
diff -r 892a20f824a7 -r e7abfeee2808 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Mar 27 17:25:22 2008 +0000
@@ -1302,7 +1302,7 @@ void hvm_task_switch(
         goto out;
     }
 
-    if ( !tr.attr.fields.g && (tr.limit < (sizeof(tss)-1)) )
+    if ( tr.limit < (sizeof(tss)-1) )
     {
         hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0);
         goto out;
@@ -1410,7 +1410,7 @@ void hvm_task_switch(
         if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
                                         4, hvm_access_write, 32,
                                         &linear_addr) )
-            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4);
+            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0);
     }
 
  out:
@@ -1418,60 +1418,31 @@ void hvm_task_switch(
     hvm_unmap(nptss_desc);
 }
 
-/*
- * __hvm_copy():
- *  @buf  = hypervisor buffer
- *  @addr = guest address to copy to/from
- *  @size = number of bytes to copy
- *  @dir  = copy *to* guest (TRUE) or *from* guest (FALSE)?
- *  @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
- *  @fetch = copy is an instruction fetch?
- * Returns number of bytes failed to copy (0 == complete success).
- */
+#define HVMCOPY_from_guest (0u<<0)
+#define HVMCOPY_to_guest   (1u<<0)
+#define HVMCOPY_no_fault   (0u<<1)
+#define HVMCOPY_fault      (1u<<1)
+#define HVMCOPY_phys       (0u<<2)
+#define HVMCOPY_virt       (1u<<2)
 static enum hvm_copy_result __hvm_copy(
-    void *buf, paddr_t addr, int size, int dir, int virt, int fetch)
+    void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
 {
     struct vcpu *curr = current;
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
-    int count, todo;
-    uint32_t pfec = PFEC_page_present;
-
-    /*
-     * We cannot use hvm_get_segment_register() while executing in
-     * vmx_realmode() as segment register state is cached. Furthermore,
-     * VMREADs on every data access hurts emulation performance.
-     * Hence we do not gather extra PFEC flags if CR0.PG == 0.
-     */
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG) )
-        virt = 0;
-
-    if ( virt )
-    {
-        struct segment_register sreg;
-        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-        if ( sreg.attr.fields.dpl == 3 )
-            pfec |= PFEC_user_mode;
-
-        if ( dir ) 
-            pfec |= PFEC_write_access;
-
-        if ( fetch ) 
-            pfec |= PFEC_insn_fetch;
-    }
-
-    todo = size;
+    int count, todo = size;
+
     while ( todo > 0 )
     {
         count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
 
-        if ( virt )
+        if ( flags & HVMCOPY_virt )
         {
             gfn = paging_gva_to_gfn(curr, addr, &pfec);
             if ( gfn == INVALID_GFN )
             {
-                if ( virt == 2 ) /* 2 means generate a fault */
+                if ( flags & HVMCOPY_fault )
                     hvm_inject_exception(TRAP_page_fault, pfec, addr);
                 return HVMCOPY_bad_gva_to_gfn;
             }
@@ -1489,16 +1460,18 @@ static enum hvm_copy_result __hvm_copy(
 
         p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
 
-        if ( dir )
+        if ( flags & HVMCOPY_to_guest )
         {
-            memcpy(p, buf, count); /* dir == TRUE:  *to* guest */
+            memcpy(p, buf, count);
             paging_mark_dirty(curr->domain, mfn);
         }
         else
-            memcpy(buf, p, count); /* dir == FALSE: *from guest */
+        {
+            memcpy(buf, p, count);
+        }
 
         unmap_domain_page(p);
-        
+
         addr += count;
         buf  += count;
         todo -= count;
@@ -1510,56 +1483,73 @@ enum hvm_copy_result hvm_copy_to_guest_p
 enum hvm_copy_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size)
 {
-    return __hvm_copy(buf, paddr, size, 1, 0, 0);
+    return __hvm_copy(buf, paddr, size,
+                      HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
+                      0);
 }
 
 enum hvm_copy_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size)
 {
-    return __hvm_copy(buf, paddr, size, 0, 0, 0);
+    return __hvm_copy(buf, paddr, size,
+                      HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
+                      0);
 }
 
 enum hvm_copy_result hvm_copy_to_guest_virt(
-    unsigned long vaddr, void *buf, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 1, 2, 0);
+    unsigned long vaddr, void *buf, int size, uint32_t pfec)
+{
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
+                      PFEC_page_present | PFEC_write_access | pfec);
 }
 
 enum hvm_copy_result hvm_copy_from_guest_virt(
-    void *buf, unsigned long vaddr, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 0, 2, 0);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec)
+{
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
+                      PFEC_page_present | pfec);
 }
 
 enum hvm_copy_result hvm_fetch_from_guest_virt(
-    void *buf, unsigned long vaddr, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current));
+    void *buf, unsigned long vaddr, int size, uint32_t pfec)
+{
+    if ( hvm_nx_enabled(current) )
+        pfec |= PFEC_insn_fetch;
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
+                      PFEC_page_present | pfec);
 }
 
 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
-    unsigned long vaddr, void *buf, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 1, 1, 0);
+    unsigned long vaddr, void *buf, int size, uint32_t pfec)
+{
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
+                      PFEC_page_present | PFEC_write_access | pfec);
 }
 
 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
-    void *buf, unsigned long vaddr, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 0, 1, 0);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec)
+{
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
+                      PFEC_page_present | pfec);
 }
 
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
-    void *buf, unsigned long vaddr, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current));
+    void *buf, unsigned long vaddr, int size, uint32_t pfec)
+{
+    if ( hvm_nx_enabled(current) )
+        pfec |= PFEC_insn_fetch;
+    return __hvm_copy(buf, vaddr, size,
+                      HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
+                      PFEC_page_present | pfec);
 }
 
 DEFINE_PER_CPU(int, guest_handles_in_xen_space);
 
-/* Note that copy_{to,from}_user_hvm require the PTE to be writable even
-   when they're only trying to read from it.  The guest is expected to
-   deal with this. */
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
 {
     int rc;
@@ -1570,7 +1560,8 @@ unsigned long copy_to_user_hvm(void *to,
         return 0;
     }
 
-    rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len);
+    rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
+                                        len, 0);
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -1584,7 +1575,7 @@ unsigned long copy_from_user_hvm(void *t
         return 0;
     }
 
-    rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len);
+    rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
diff -r 892a20f824a7 -r e7abfeee2808 xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c    Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/arch/x86/hvm/svm/emulate.c    Thu Mar 27 17:25:22 2008 +0000
@@ -32,9 +32,11 @@ static int inst_copy_from_guest(
 static int inst_copy_from_guest(
     unsigned char *buf, unsigned long guest_eip, int inst_len)
 {
+    struct vmcb_struct *vmcb = current->arch.hvm_svm.vmcb;
+    uint32_t pfec = (vmcb->cpl == 3) ? PFEC_user_mode : 0;
     if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) )
         return 0;
-    if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) )
+    if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len, pfec) )
         return 0;
     return inst_len;
 }
diff -r 892a20f824a7 -r e7abfeee2808 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Mar 27 17:25:22 2008 +0000
@@ -152,9 +152,9 @@ hvm_read(enum x86_segment seg,
     *val = 0;
 
     if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_virt(val, addr, bytes);
+        rc = hvm_fetch_from_guest_virt(val, addr, bytes, 0);
     else
-        rc = hvm_copy_from_guest_virt(val, addr, bytes);
+        rc = hvm_copy_from_guest_virt(val, addr, bytes, 0);
 
     switch ( rc )
     {
@@ -416,7 +416,7 @@ struct x86_emulate_ops *shadow_init_emul
             x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
          !hvm_fetch_from_guest_virt_nofault(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -444,7 +444,7 @@ void shadow_continue_emulation(struct sh
                     x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
                     hvm_access_insn_fetch, sh_ctxt, &addr) &&
                  !hvm_fetch_from_guest_virt_nofault(
-                     sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+                     sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0))
                 ? sizeof(sh_ctxt->insn_buf) : 0;
             sh_ctxt->insn_buf_eip = regs->eip;
         }
diff -r 892a20f824a7 -r e7abfeee2808 xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c        Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/arch/x86/x86_emulate.c        Thu Mar 27 17:25:22 2008 +0000
@@ -787,7 +787,7 @@ _mode_iopl(
     int cpl = get_cpl(ctxt, ops);
     if ( cpl == -1 )
         return -1;
-    return ((cpl >= 0) && (cpl <= ((ctxt->regs->eflags >> 12) & 3)));
+    return (cpl <= ((ctxt->regs->eflags >> 12) & 3));
 }
 
 #define mode_ring0() ({                         \
@@ -800,6 +800,50 @@ _mode_iopl(
     fail_if(_iopl < 0);                         \
     _iopl;                                      \
 })
+
+static int ioport_access_check(
+    unsigned int first_port,
+    unsigned int bytes,
+    struct x86_emulate_ctxt *ctxt,
+    struct x86_emulate_ops *ops)
+{
+    unsigned long iobmp;
+    struct segment_register tr;
+    int rc = X86EMUL_OKAY;
+
+    if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() )
+        return X86EMUL_OKAY;
+
+    fail_if(ops->read_segment == NULL);
+    if ( (rc = ops->read_segment(x86_seg_tr, &tr, ctxt)) != 0 )
+        return rc;
+
+    /* Ensure that the TSS is valid and has an io-bitmap-offset field. */
+    if ( !tr.attr.fields.p ||
+         ((tr.attr.fields.type & 0xd) != 0x9) ||
+         (tr.limit < 0x67) )
+        goto raise_exception;
+
+    if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) )
+        return rc;
+
+    /* Ensure TSS includes two bytes including byte containing first port. */
+    iobmp += first_port / 8;
+    if ( tr.limit <= iobmp )
+        goto raise_exception;
+
+    if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) )
+        return rc;
+    if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 )
+        goto raise_exception;
+
+ done:
+    return rc;
+
+ raise_exception:
+    fail_if(ops->inject_hw_exception == NULL);
+    return ops->inject_hw_exception(EXC_GP, 0, ctxt) ? : X86EMUL_EXCEPTION;
+}
 
 static int
 in_realmode(
@@ -2265,12 +2309,14 @@ x86_emulate(
 
     case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ {
         unsigned long nr_reps = get_rep_prefix();
+        unsigned int port = (uint16_t)_regs.edx;
         dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
         dst.mem.seg = x86_seg_es;
         dst.mem.off = truncate_ea(_regs.edi);
+        if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
+            goto done;
         if ( (nr_reps > 1) && (ops->rep_ins != NULL) &&
-             ((rc = ops->rep_ins((uint16_t)_regs.edx, dst.mem.seg,
-                                 dst.mem.off, dst.bytes,
+             ((rc = ops->rep_ins(port, dst.mem.seg, dst.mem.off, dst.bytes,
                                  &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) )
         {
             if ( rc != 0 )
@@ -2279,8 +2325,7 @@ x86_emulate(
         else
         {
             fail_if(ops->read_io == NULL);
-            if ( (rc = ops->read_io((uint16_t)_regs.edx, dst.bytes,
-                                    &dst.val, ctxt)) != 0 )
+            if ( (rc = ops->read_io(port, dst.bytes, &dst.val, ctxt)) != 0 )
                 goto done;
             dst.type = OP_MEM;
             nr_reps = 1;
@@ -2294,10 +2339,13 @@ x86_emulate(
 
     case 0x6e ... 0x6f: /* outs %esi,%dx */ {
         unsigned long nr_reps = get_rep_prefix();
+        unsigned int port = (uint16_t)_regs.edx;
         dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
+        if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
+            goto done;
         if ( (nr_reps > 1) && (ops->rep_outs != NULL) &&
              ((rc = ops->rep_outs(ea.mem.seg, truncate_ea(_regs.esi),
-                                  (uint16_t)_regs.edx, dst.bytes,
+                                  port, dst.bytes,
                                   &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) )
         {
             if ( rc != 0 )
@@ -2309,8 +2357,7 @@ x86_emulate(
                                  &dst.val, dst.bytes, ctxt)) != 0 )
                 goto done;
             fail_if(ops->write_io == NULL);
-            if ( (rc = ops->write_io((uint16_t)_regs.edx, dst.bytes,
-                                     dst.val, ctxt)) != 0 )
+            if ( (rc = ops->write_io(port, dst.bytes, dst.val, ctxt)) != 0 )
                 goto done;
             nr_reps = 1;
         }
@@ -2831,6 +2878,8 @@ x86_emulate(
                              ? insn_fetch_type(uint8_t)
                              : (uint16_t)_regs.edx);
         op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
+        if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops)) != 0 )
+            goto done;
         if ( b & 2 )
         {
             /* out */
diff -r 892a20f824a7 -r e7abfeee2808 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Mar 27 17:14:41 2008 +0000
+++ b/xen/include/asm-x86/hvm/support.h Thu Mar 27 17:25:22 2008 +0000
@@ -99,7 +99,11 @@ enum hvm_copy_result hvm_copy_from_guest
     void *buf, paddr_t paddr, int size);
 
 /*
- * Copy to/from a guest virtual address.
+ * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
+ * if emulating a user-mode access (CPL=3). All other flags in @pfec are
+ * managed by the called function: it is therefore optional for the caller
+ * to set them.
+ * 
  * Returns:
  *  HVMCOPY_okay: Copy was entirely successful.
  *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
@@ -110,22 +114,22 @@ enum hvm_copy_result hvm_copy_from_guest
  *                          for injection into the current HVM VCPU.
  */
 enum hvm_copy_result hvm_copy_to_guest_virt(
-    unsigned long vaddr, void *buf, int size);
+    unsigned long vaddr, void *buf, int size, uint32_t pfec);
 enum hvm_copy_result hvm_copy_from_guest_virt(
-    void *buf, unsigned long vaddr, int size);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec);
 enum hvm_copy_result hvm_fetch_from_guest_virt(
-    void *buf, unsigned long vaddr, int size);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec);
 
 /*
  * As above (copy to/from a guest virtual address), but no fault is generated
  * when HVMCOPY_bad_gva_to_gfn is returned.
  */
 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
-    unsigned long vaddr, void *buf, int size);
+    unsigned long vaddr, void *buf, int size, uint32_t pfec);
 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
-    void *buf, unsigned long vaddr, int size);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec);
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
-    void *buf, unsigned long vaddr, int size);
+    void *buf, unsigned long vaddr, int size, uint32_t pfec);
 
 void hvm_print_line(struct vcpu *v, const char c);
 void hlt_timer_fn(void *data);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86_emulate: Check I/O port accesses., Xen patchbot-unstable <=