WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH,RFC 8/17] 32-on-64 emulation

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH,RFC 8/17] 32-on-64 emulation
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Wed, 04 Oct 2006 17:36:52 +0200
Delivery-date: Wed, 04 Oct 2006 08:35:56 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This intentionally enhances the decoder of the privileged op emulation farther
than explicitly required to get 32on64 to work - there were missing pieces
already before.

Index: 2006-10-04/xen/arch/x86/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/mm.c   2006-10-04 15:16:05.000000000 +0200
+++ 2006-10-04/xen/arch/x86/mm.c        2006-10-04 15:18:45.000000000 +0200
@@ -1741,6 +1741,31 @@ int new_guest_cr3(unsigned long mfn)
     if ( hvm_guest(v) && !hvm_paging_enabled(v) )
         domain_crash_synchronous();
 
+#ifdef CONFIG_COMPAT
+    if ( IS_COMPAT(d) )
+    {
+        l4_pgentry_t l4e = l4e_from_pfn(mfn, 
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY);
+
+        if ( shadow_mode_refcounts(d) )
+        {
+            DPRINTK("CR3 update on shadow-refcounted domain!");//todo
+            return 0;
+        }
+
+        okay = mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)),
+                            l4e, mfn);
+        if ( unlikely(!okay) )
+        {
+            MEM_LOG("Error while installing new compat baseptr %lx", mfn);
+            return 0;
+        }
+
+        invalidate_shadow_ldt(v);
+        write_ptbase(v);
+
+        return 1;
+    }
+#endif
     if ( shadow_mode_refcounts(d) )
     {
         okay = get_page_from_pagenr(mfn, d);
@@ -3253,7 +3278,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
 
     emul_ctxt.regs = guest_cpu_user_regs();
     emul_ctxt.cr2  = addr;
-    emul_ctxt.mode = X86EMUL_MODE_HOST;
+    emul_ctxt.mode = !IS_COMPAT(d) ? X86EMUL_MODE_HOST : X86EMUL_MODE_PROT32;
     if ( x86_emulate_memop(&emul_ctxt, &ptwr_emulate_ops) )
         goto bail;
 
Index: 2006-10-04/xen/arch/x86/traps.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/traps.c        2006-10-04 15:11:03.000000000 
+0200
+++ 2006-10-04/xen/arch/x86/traps.c     2006-10-04 15:18:45.000000000 +0200
@@ -973,6 +973,64 @@ long do_fpu_taskswitch(int set)
     return 0;
 }
 
+static int read_descriptor(unsigned int sel,
+                           const struct vcpu *v,
+                           const struct cpu_user_regs * regs,
+                           unsigned long *base,
+                           unsigned long *limit,
+                           unsigned int *ar,
+                           unsigned int vm86attr)
+{
+    struct desc_struct desc;
+
+    if ( !vm86_mode(regs) )
+    {
+        if ( sel < 4)
+            desc.b = desc.a = 0;
+        else if ( __get_user(desc,
+                        (const struct desc_struct *)(!(sel & 4)
+                                                     ? GDT_VIRT_START(v)
+                                                     : LDT_VIRT_START(v))
+                        + (sel >> 3)) )
+            return 0;
+        if ( !(vm86attr & _SEGMENT_CODE) )
+            desc.b &= ~_SEGMENT_L;
+    }
+    else
+    {
+        desc.a = (sel << 20) | 0xffff;
+        desc.b = vm86attr | (sel >> 12);
+    }
+
+    *ar = desc.b & 0x00f0ff00;
+    if ( !(desc.b & _SEGMENT_L) )
+    {
+        *base = (desc.a >> 16) + ((desc.b & 0xff) << 16) + (desc.b & 
0xff000000);
+        *limit = (desc.a & 0xffff) | (desc.b & 0x000f0000);
+        if ( desc.b & _SEGMENT_G )
+            *limit = ((*limit + 1) << 12) - 1;
+#ifndef NDEBUG
+        if ( !vm86_mode(regs) && sel > 3 )
+        {
+            unsigned int a, l;
+            unsigned char valid;
+
+            __asm__("larl %2, %0\n\tsetz %1" : "=r" (a), "=rm" (valid) : "rm" 
(sel));
+            BUG_ON(valid && (a & 0x00f0ff00) != *ar);
+            __asm__("lsll %2, %0\n\tsetz %1" : "=r" (l), "=rm" (valid) : "rm" 
(sel));
+            BUG_ON(valid && l != *limit);
+        }
+#endif
+    }
+    else
+    {
+        *base = 0UL;
+        *limit = ~0UL;
+    }
+
+    return 1;
+}
+
 /* Has the guest requested sufficient permission for this I/O access? */
 static inline int guest_io_okay(
     unsigned int port, unsigned int bytes,
@@ -1090,79 +1148,182 @@ static inline void outl_user(
 }
 
 /* Instruction fetch with error handling. */
-#define insn_fetch(_type, _size, cs, eip)                                   \
-({  unsigned long _rc, _x, _ptr = eip;                                      \
-    if ( vm86_mode(regs) )                                                  \
-        _ptr += cs << 4;                                                    \
-    if ( (_rc = copy_from_user(&_x, (_type *)_ptr, sizeof(_type))) != 0 )   \
+#define insn_fetch(type, base, eip, limit)                                  \
+({  unsigned long _rc, _ptr = (base) + (eip);                               \
+    type _x;                                                                \
+    if ( (limit) < sizeof(_x) - 1 || (eip) > (limit) - (sizeof(_x) - 1) )   \
+        goto fail;                                                          \
+    if ( (_rc = copy_from_user(&_x, (type *)_ptr, sizeof(_x))) != 0 )       \
     {                                                                       \
-        propagate_page_fault(eip + sizeof(_type) - _rc, 0);                 \
+        propagate_page_fault(_ptr + sizeof(_x) - _rc, 0);                   \
         return EXCRET_fault_fixed;                                          \
     }                                                                       \
-    eip += _size; (_type)_x; })
+    (eip) += sizeof(_x); _x; })
+
+#if defined(CONFIG_X86_32)
+# define read_sreg(regs, sr) ((regs)->sr)
+#elif defined(CONFIG_X86_64)
+# define read_sreg(regs, sr) read_segment_register(sr)
+#endif
 
 static int emulate_privileged_op(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
-    unsigned long *reg, eip = regs->eip, cs = regs->cs, res;
-    u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0;
-    unsigned int port, i, op_bytes = 4, data, rc;
+    unsigned long *reg, eip = regs->eip, res;
+    u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, rex = 0;
+    enum { lm_seg_none, lm_seg_fs, lm_seg_gs } lm_ovr = lm_seg_none;
+    unsigned int port, i, data_sel, ar, data, rc;
+    unsigned int op_bytes, op_default, ad_bytes, ad_default;
+#define rd_ad(reg) (ad_bytes >= sizeof(regs->reg) \
+                    ? regs->reg \
+                    : ad_bytes == 4 \
+                      ? (u32)regs->reg \
+                      : (u16)regs->reg)
+#define wr_ad(reg, val) (ad_bytes >= sizeof(regs->reg) \
+                         ? regs->reg = (val) \
+                         : ad_bytes == 4 \
+                           ? (*(u32 *)&regs->reg = (val)) \
+                           : (*(u16 *)&regs->reg = (val)))
+    unsigned long code_base, code_limit;
     u32 l, h;
     io_emul_stub_t *stub;
     char *insn;
 
+    if ( !read_descriptor(regs->cs, v, regs,
+                          &code_base, &code_limit, &ar,
+                          _SEGMENT_CODE|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) )
+        goto fail;
+    op_default = op_bytes = (ar & (_SEGMENT_L|_SEGMENT_DB)) ? 4 : 2;
+    ad_default = ad_bytes = (ar & _SEGMENT_L) ? 8 : op_default;
+    if ( !(ar & (_SEGMENT_CODE|_SEGMENT_S|_SEGMENT_P)) )
+        goto fail;
+
+    /* emulating only opcodes not allowing SS to be default */
+    data_sel = read_sreg(regs, ds);
+
     /* Legacy prefixes. */
-    for ( i = 0; i < 8; i++ )
+    for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) )
     {
-        switch ( opcode = insn_fetch(u8, 1, cs, eip) )
+        switch ( opcode = insn_fetch(u8, code_base, eip, code_limit) )
         {
         case 0x66: /* operand-size override */
-            op_bytes ^= 6; /* switch between 2/4 bytes */
-            break;
+            op_bytes = op_default ^ 6; /* switch between 2/4 bytes */
+            continue;
         case 0x67: /* address-size override */
+            ad_bytes = ad_default != 4 ? 4 : 2; /* switch to 2/4 bytes */
+            continue;
         case 0x2e: /* CS override */
+            data_sel = regs->cs;
+            continue;
         case 0x3e: /* DS override */
+            data_sel = read_sreg(regs, ds);
+            continue;
         case 0x26: /* ES override */
+            data_sel = read_sreg(regs, es);
+            continue;
         case 0x64: /* FS override */
+            data_sel = read_sreg(regs, fs);
+            lm_ovr = lm_seg_fs;
+            continue;
         case 0x65: /* GS override */
+            data_sel = read_sreg(regs, gs);
+            lm_ovr = lm_seg_gs;
+            continue;
         case 0x36: /* SS override */
+            data_sel = regs->ss;
+            continue;
         case 0xf0: /* LOCK */
+            continue;
         case 0xf2: /* REPNE/REPNZ */
-            break;
         case 0xf3: /* REP/REPE/REPZ */
             rep_prefix = 1;
-            break;
+            continue;
         default:
-            goto done_prefixes;
+            if ( (ar & _SEGMENT_L) && (opcode & 0xf0) == 0x40 )
+            {
+                rex = opcode;
+                continue;
+            }
+            break;
         }
+        break;
     }
- done_prefixes:
 
-#ifdef __x86_64__
     /* REX prefix. */
-    if ( (opcode & 0xf0) == 0x40 )
+    if ( rex )
     {
+        if ( opcode & 8 ) /* REX.W */
+            op_bytes = 4; /* emulating only opcodes not supporting 64-bit 
operands */
         modrm_reg = (opcode & 4) << 1;  /* REX.R */
+        /* REX.X does not need to be decoded. */
         modrm_rm  = (opcode & 1) << 3;  /* REX.B */
-
-        /* REX.W and REX.X do not need to be decoded. */
-        opcode = insn_fetch(u8, 1, cs, eip);
     }
-#endif
-    
+
     /* Input/Output String instructions. */
     if ( (opcode >= 0x6c) && (opcode <= 0x6f) )
     {
-        if ( rep_prefix && (regs->ecx == 0) )
+        unsigned long data_base, data_limit;
+
+        if ( rep_prefix && (rd_ad(ecx) == 0) )
             goto done;
 
+        if ( !(opcode & 2) )
+        {
+            data_sel = read_sreg(regs, es);
+            lm_ovr = lm_seg_none;
+        }
+
+        if ( !(ar & _SEGMENT_L) )
+        {
+            if ( !read_descriptor(data_sel, v, regs,
+                                  &data_base, &data_limit, &ar,
+                                  
_SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P) )
+                goto fail;
+            if ( !(ar & (_SEGMENT_S|_SEGMENT_P)) ||
+                 (opcode & 2 ?
+                  (ar & _SEGMENT_CODE) && !(ar & _SEGMENT_WR) :
+                  (ar & _SEGMENT_CODE) || !(ar & _SEGMENT_WR)) )
+                goto fail;
+        }
+#ifdef CONFIG_X86_64
+        else
+        {
+            if ( lm_ovr == lm_seg_none || data_sel < 4 )
+            {
+                switch ( lm_ovr )
+                {
+                case lm_seg_none:
+                    data_base = 0UL;
+                    break;
+                case lm_seg_fs:
+                    data_base = v->arch.guest_context.fs_base;
+                    break;
+                case lm_seg_gs:
+                    if ( guest_kernel_mode(v, regs) )
+                        data_base = v->arch.guest_context.gs_base_kernel;
+                    else
+                        data_base = v->arch.guest_context.gs_base_user;
+                    break;
+                }
+            }
+            else
+                read_descriptor(data_sel, v, regs,
+                                &data_base, &data_limit, &ar,
+                                0);
+            data_limit = ~0UL;
+            ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P;
+        }
+#endif
+
     continue_io_string:
         switch ( opcode )
         {
         case 0x6c: /* INSB */
             op_bytes = 1;
         case 0x6d: /* INSW/INSL */
-            if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
+            if ( data_limit < op_bytes - 1 ||
+                 rd_ad(edi) > data_limit - (op_bytes - 1) ||
+                 !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
                 goto fail;
             switch ( op_bytes )
             {
@@ -1176,24 +1337,26 @@ static int emulate_privileged_op(struct 
                 data = (u32)inl_user((u16)regs->edx, NULL, v, regs);
                 break;
             }
-            if ( (rc = copy_to_user((void *)regs->edi, &data, op_bytes)) != 0 )
+            if ( (rc = copy_to_user((void *)data_base + rd_ad(edi), &data, 
op_bytes)) != 0 )
             {
-                propagate_page_fault(regs->edi + op_bytes - rc,
+                propagate_page_fault(data_base + rd_ad(edi) + op_bytes - rc,
                                      PFEC_write_access);
                 return EXCRET_fault_fixed;
             }
-            regs->edi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes);
+            wr_ad(edi, regs->edi + (int)((regs->eflags & EF_DF) ? -op_bytes : 
op_bytes));
             break;
 
         case 0x6e: /* OUTSB */
             op_bytes = 1;
         case 0x6f: /* OUTSW/OUTSL */
-            if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
+            if ( data_limit < op_bytes - 1 ||
+                 rd_ad(esi) > data_limit - (op_bytes - 1) ||
+                 !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
                 goto fail;
-            rc = copy_from_user(&data, (void *)regs->esi, op_bytes);
+            rc = copy_from_user(&data, (void *)data_base + rd_ad(esi), 
op_bytes);
             if ( rc != 0 )
             {
-                propagate_page_fault(regs->esi + op_bytes - rc, 0);
+                propagate_page_fault(data_base + rd_ad(esi) + op_bytes - rc, 
0);
                 return EXCRET_fault_fixed;
             }
             switch ( op_bytes )
@@ -1208,11 +1371,11 @@ static int emulate_privileged_op(struct 
                 outl_user((u32)data, (u16)regs->edx, NULL, v, regs);
                 break;
             }
-            regs->esi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes);
+            wr_ad(esi, regs->esi + (int)((regs->eflags & EF_DF) ? -op_bytes : 
op_bytes));
             break;
         }
 
-        if ( rep_prefix && (--regs->ecx != 0) )
+        if ( rep_prefix && (wr_ad(ecx, regs->ecx - 1) != 0) )
         {
             if ( !hypercall_preempt_check() )
                 goto continue_io_string;
@@ -1233,7 +1396,7 @@ static int emulate_privileged_op(struct 
     case 0xe4: /* IN imm8,%al */
         op_bytes = 1;
     case 0xe5: /* IN imm8,%eax */
-        port = insn_fetch(u8, 1, cs, eip);
+        port = insn_fetch(u8, code_base, eip, code_limit);
         *insn = port;
     exec_in:
         if ( !guest_io_okay(port, op_bytes, v, regs) )
@@ -1264,7 +1427,7 @@ static int emulate_privileged_op(struct 
     case 0xe6: /* OUT %al,imm8 */
         op_bytes = 1;
     case 0xe7: /* OUT %eax,imm8 */
-        port = insn_fetch(u8, 1, cs, eip);
+        port = insn_fetch(u8, code_base, eip, code_limit);
         *insn = port;
     exec_out:
         if ( !guest_io_okay(port, op_bytes, v, regs) )
@@ -1315,7 +1478,7 @@ static int emulate_privileged_op(struct 
         goto fail;
 
     /* Privileged (ring 0) instructions. */
-    opcode = insn_fetch(u8, 1, cs, eip);
+    opcode = insn_fetch(u8, code_base, eip, code_limit);
     switch ( opcode )
     {
     case 0x06: /* CLTS */
@@ -1333,7 +1496,7 @@ static int emulate_privileged_op(struct 
         break;
 
     case 0x20: /* MOV CR?,<reg> */
-        opcode = insn_fetch(u8, 1, cs, eip);
+        opcode = insn_fetch(u8, code_base, eip, code_limit);
         modrm_reg |= (opcode >> 3) & 7;
         modrm_rm  |= (opcode >> 0) & 7;
         reg = decode_register(modrm_rm, regs, 0);
@@ -1349,8 +1512,14 @@ static int emulate_privileged_op(struct 
             break;
             
         case 3: /* Read CR3 */
-            *reg = xen_pfn_to_cr3(mfn_to_gmfn(
-                v->domain, pagetable_get_pfn(v->arch.guest_table)));
+            if ( !IS_COMPAT(v->domain) )
+                *reg = xen_pfn_to_cr3(mfn_to_gmfn(
+                    v->domain, pagetable_get_pfn(v->arch.guest_table)));
+#ifdef CONFIG_COMPAT
+            else
+                *reg = compat_pfn_to_cr3(mfn_to_gmfn(
+                    v->domain, l4e_get_pfn(*(l4_pgentry_t 
*)__va(pagetable_get_paddr(v->arch.guest_table)))));
+#endif
             break;
 
         case 4: /* Read CR4 */
@@ -1367,7 +1536,7 @@ static int emulate_privileged_op(struct 
         break;
 
     case 0x21: /* MOV DR?,<reg> */
-        opcode = insn_fetch(u8, 1, cs, eip);
+        opcode = insn_fetch(u8, code_base, eip, code_limit);
         modrm_reg |= (opcode >> 3) & 7;
         modrm_rm  |= (opcode >> 0) & 7;
         reg = decode_register(modrm_rm, regs, 0);
@@ -1377,7 +1546,7 @@ static int emulate_privileged_op(struct 
         break;
 
     case 0x22: /* MOV <reg>,CR? */
-        opcode = insn_fetch(u8, 1, cs, eip);
+        opcode = insn_fetch(u8, code_base, eip, code_limit);
         modrm_reg |= (opcode >> 3) & 7;
         modrm_rm  |= (opcode >> 0) & 7;
         reg = decode_register(modrm_rm, regs, 0);
@@ -1399,7 +1568,12 @@ static int emulate_privileged_op(struct 
 
         case 3: /* Write CR3 */
             LOCK_BIGLOCK(v->domain);
-            (void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
+            if ( !IS_COMPAT(v->domain) )
+                new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
+#ifdef CONFIG_COMPAT
+            else
+                new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg)));
+#endif
             UNLOCK_BIGLOCK(v->domain);
             break;
 
@@ -1417,7 +1591,7 @@ static int emulate_privileged_op(struct 
         break;
 
     case 0x23: /* MOV <reg>,DR? */
-        opcode = insn_fetch(u8, 1, cs, eip);
+        opcode = insn_fetch(u8, code_base, eip, code_limit);
         modrm_reg |= (opcode >> 3) & 7;
         modrm_rm  |= (opcode >> 0) & 7;
         reg = decode_register(modrm_rm, regs, 0);
@@ -1430,18 +1604,24 @@ static int emulate_privileged_op(struct 
         {
 #ifdef CONFIG_X86_64
         case MSR_FS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
                 goto fail;
             v->arch.guest_context.fs_base =
                 ((u64)regs->edx << 32) | regs->eax;
             break;
         case MSR_GS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
                 goto fail;
             v->arch.guest_context.gs_base_kernel =
                 ((u64)regs->edx << 32) | regs->eax;
             break;
         case MSR_SHADOW_GS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
                 goto fail;
             v->arch.guest_context.gs_base_user =
@@ -1466,14 +1646,20 @@ static int emulate_privileged_op(struct 
         {
 #ifdef CONFIG_X86_64
         case MSR_FS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             regs->eax = v->arch.guest_context.fs_base & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.fs_base >> 32;
             break;
         case MSR_GS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             regs->eax = v->arch.guest_context.gs_base_kernel & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.gs_base_kernel >> 32;
             break;
         case MSR_SHADOW_GS_BASE:
+            if ( IS_COMPAT(v->domain) )
+                goto fail;
             regs->eax = v->arch.guest_context.gs_base_user & 0xFFFFFFFFUL;
             regs->edx = v->arch.guest_context.gs_base_user >> 32;
             break;
@@ -1501,6 +1687,9 @@ static int emulate_privileged_op(struct 
         goto fail;
     }
 
+#undef wr_ad
+#undef rd_ad
+
  done:
     regs->eip = eip;
     return EXCRET_fault_fixed;
Index: 2006-10-04/xen/arch/x86/x86_64/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/x86_64/mm.c    2006-10-04 15:16:05.000000000 
+0200
+++ 2006-10-04/xen/arch/x86/x86_64/mm.c 2006-10-04 15:18:45.000000000 +0200
@@ -352,7 +352,11 @@ int check_descriptor(const struct domain
 
     /* All code and data segments are okay. No base/limit checking. */
     if ( (b & _SEGMENT_S) )
-        goto good;
+    {
+        if ( !IS_COMPAT(dom) || !(b & _SEGMENT_L) )
+            goto good;
+        goto bad;
+    }
 
     /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
     if ( (b & _SEGMENT_TYPE) == 0x000 )
Index: 2006-10-04/xen/include/asm-x86/desc.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/desc.h  2006-10-04 15:03:07.000000000 
+0200
+++ 2006-10-04/xen/include/asm-x86/desc.h       2006-10-04 15:18:45.000000000 
+0200
@@ -113,12 +113,19 @@
 
 /* These are bitmasks for the high 32 bits of a descriptor table entry. */
 #define _SEGMENT_TYPE    (15<< 8)
+#define _SEGMENT_WR      ( 1<< 9) /* Writeable (data) or Readable (code)
+                                     segment */
 #define _SEGMENT_EC      ( 1<<10) /* Expand-down or Conforming segment */
 #define _SEGMENT_CODE    ( 1<<11) /* Code (vs data) segment for non-system
                                      segments */
 #define _SEGMENT_S       ( 1<<12) /* System descriptor (yes iff S==0) */
 #define _SEGMENT_DPL     ( 3<<13) /* Descriptor Privilege Level */
 #define _SEGMENT_P       ( 1<<15) /* Segment Present */
+#ifdef __x86_64
+#define _SEGMENT_L       ( 1<<21) /* 64-bit segment */
+#else
+#define _SEGMENT_L       0
+#endif
 #define _SEGMENT_DB      ( 1<<22) /* 16- or 32-bit segment */
 #define _SEGMENT_G       ( 1<<23) /* Granularity */
 
Index: 2006-10-04/xen/include/asm-x86/mm.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/mm.h    2006-10-04 15:16:05.000000000 
+0200
+++ 2006-10-04/xen/include/asm-x86/mm.h 2006-10-04 15:18:45.000000000 +0200
@@ -344,6 +344,11 @@ static inline unsigned long get_mfn_from
     return INVALID_MFN;
 }
 
+#ifdef CONFIG_COMPAT
+#define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 
20))
+#define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 
20))
+#endif
+
 #ifdef MEMORY_GUARD
 void memguard_init(void);
 void memguard_guard_range(void *p, unsigned long l);
Index: 2006-10-04/xen/include/asm-x86/x86_32/uaccess.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/x86_32/uaccess.h        2005-11-17 
15:51:06.000000000 +0100
+++ 2006-10-04/xen/include/asm-x86/x86_32/uaccess.h     2006-10-04 
15:18:45.000000000 +0200
@@ -83,7 +83,7 @@ do {                                                          
        \
        case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
        case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break;  \
        case 8: __get_user_u64(x,ptr,retval,errret);break;              \
-       default: (x) = __get_user_bad();                                \
+       default: __get_user_bad();                                      \
        }                                                               \
 } while (0)
 
Index: 2006-10-04/xen/include/asm-x86/x86_64/uaccess.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/x86_64/uaccess.h        2006-10-04 
15:09:52.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/x86_64/uaccess.h     2006-10-04 
15:18:45.000000000 +0200
@@ -48,7 +48,7 @@ do {                                                          
        \
        case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
        case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
        case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
-       default: (x) = __get_user_bad();                                \
+       default: __get_user_bad();                                      \
        }                                                               \
 } while (0)
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH,RFC 8/17] 32-on-64 emulation, Jan Beulich <=