WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merged.

# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID 8a5dca8c1e8f18440f6b02c8fe53a8ab6708ce90
# Parent  ad6a208992ccd0ab8b2a9def9de44d442eef1e96
# Parent  8f83f7ccf185dea197f8cb21bf0bc095bb612c8e
Merged.

diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S Wed Nov  9 15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S Thu Nov 10 10:43:26 2005
@@ -81,7 +81,7 @@
 #define evtchn_upcall_pending          /* 0 */
 #define evtchn_upcall_mask             1
 
-#define sizeof_vcpu_shift              3
+#define sizeof_vcpu_shift              4
 
 #ifdef CONFIG_SMP
 #define preempt_disable(reg)   incl TI_preempt_count(reg)
@@ -813,35 +813,9 @@
        pushl $do_alignment_check
        jmp error_code
 
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-# fastcall register usage:  %eax = pt_regs, %edx = error code,
-#                          %ecx = fault address
 ENTRY(page_fault)
-       pushl %ds
-       pushl %eax
-       xorl %eax, %eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       decl %eax                       /* eax = -1 */
-       pushl %ecx
-       pushl %ebx
-       cld
-       pushl %es
-#      UNWIND_ESPFIX_STACK
-       popl %edi
-       movl ES(%esp), %ecx             /* get the faulting address */
-       movl ORIG_EAX(%esp), %edx       /* get the error code */
-       movl %eax, ORIG_EAX(%esp)
-       movl %edi, ES(%esp)
-       movl $(__KERNEL_DS),%eax
-       movl %eax, %ds
-       movl %eax, %es
-       movl %esp,%eax                  /* pt_regs pointer */
-       call do_page_fault
-       jmp ret_from_exception
+       pushl $do_page_fault
+       jmp error_code
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Wed Nov  9 15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Thu Nov 10 10:43:26 2005
@@ -972,7 +972,7 @@
 
 
 /*
- * NB. All these are "trap gates" (i.e. events_mask isn't cleared) except
+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
  * for those that specify <dpl>|4 in the second field.
  */
 static trap_info_t trap_table[] = {
@@ -988,7 +988,7 @@
        { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present        },
        { 12, 0, __KERNEL_CS, (unsigned long)stack_segment              },
        { 13, 0, __KERNEL_CS, (unsigned long)general_protection         },
-       { 14, 0, __KERNEL_CS, (unsigned long)page_fault                 },
+       { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault               },
        { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment          },
        { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error          },
        { 17, 0, __KERNEL_CS, (unsigned long)alignment_check            },
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c     Wed Nov  9 15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c     Thu Nov 10 10:43:26 2005
@@ -279,14 +279,17 @@
  *     bit 1 == 0 means read, 1 means write
  *     bit 2 == 0 means kernel, 1 means user-mode
  */
-fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code,
-                             unsigned long address)
+fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
+       unsigned long address;
        int write;
        siginfo_t info;
+
+       address = HYPERVISOR_shared_info->vcpu_data[
+               smp_processor_id()].arch.cr2;
 
        /* Set the "privileged fault" bit to something sane. */
        error_code &= ~4;
@@ -297,11 +300,10 @@
        if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
                                        SIGSEGV) == NOTIFY_STOP)
                return;
-#if 0
+
        /* It's safe to allow irq's after cr2 has been saved */
-       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+       if ((uint8_t)(regs->xcs >> 16) == 0)
                local_irq_enable();
-#endif
 
        tsk = current;
 
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S       Wed Nov  9 
15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S       Thu Nov 10 
10:43:26 2005
@@ -874,42 +874,8 @@
        ret
        CFI_ENDPROC
 
-
- /*
- * Copy error_entry because of the different stack frame
- */                        
 ENTRY(page_fault)
-        movq (%rsp),%rcx
-        movq 8(%rsp),%r11
-        addq $0x10,%rsp         # now %rsp points to %cr2
-        pushq %rax
-        leaq do_page_fault(%rip),%rax
-       cld     
-       subq  $13*8,%rsp
-       movq %rdx,12*8(%rsp)    # save %rdx
-       movq 13*8(%rsp),%rdx    # load rax
-       movq %rcx,11*8(%rsp)
-       movq %rdx,10*8(%rsp)    # store rax
-        movq %rsi,13*8(%rsp)    # now save %rsi
-        movq 14*8(%rsp),%rdx    # load %cr2, 3rd argument
-       movq %r8, 9*8(%rsp)
-       movq %r9, 8*8(%rsp)
-       movq %r10,7*8(%rsp)
-       movq %r11,6*8(%rsp)
-       movq %rbx,5*8(%rsp) 
-       movq %rbp,4*8(%rsp) 
-       movq %r12,3*8(%rsp) 
-       movq %r13,2*8(%rsp) 
-       movq %r14,1*8(%rsp) 
-       movq %r15,(%rsp)
-#if 0        
-       cmpl $__KERNEL_CS,CS(%rsp)
-       je  error_kernelspace
-#endif
-        /*
-         * 1st and 2nd arguments are set by error_call_handler
-         */
-        jmp error_call_handler
+       errorentry do_page_fault
 
 ENTRY(coprocessor_error)
        zeroentry do_coprocessor_error
@@ -948,24 +914,15 @@
 paranoid_exit:
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz paranoid_restore
+       testl $3,CS(%rsp)
+       jnz   paranoid_userspace
 paranoid_swapgs:       
-/*     swapgs */
+       swapgs
 paranoid_restore:      
        RESTORE_ALL 8
-/*     iretq */
+       iretq
 paranoid_userspace:    
        GET_THREAD_INFO(%rcx)
-#      movl threadinfo_flags(%rcx),%edx
-#      testl $_TIF_NEED_RESCHED,%edx
-#      jnz paranoid_resched
-#      testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-#      jnz paranoid_signal
-#      jmp paranoid_swapgs
-#paranoid_resched:             
-#/*    sti */
-#      call schedule
-#      jmp paranoid_exit
-#paranoid_signal:              
        movl threadinfo_flags(%rcx),%ebx
        andl $_TIF_WORK_MASK,%ebx
        jz paranoid_swapgs
@@ -975,13 +932,10 @@
        testl $_TIF_NEED_RESCHED,%ebx
        jnz paranoid_schedule
        movl %ebx,%edx                  /* arg3: thread flags */
-/*     sti */
-#      xorl %esi,%esi /* oldset */
-#      movq %rsp,%rdi /* &pt_regs */
+       sti
        xorl %esi,%esi                  /* arg2: oldset */
        movq %rsp,%rdi                  /* arg1: &pt_regs */
        call do_notify_resume
-#      jmp paranoid_exit
        cli
        jmp paranoid_userspace
 paranoid_schedule:
@@ -1057,4 +1011,3 @@
 ENTRY(call_debug)
        zeroentry do_call_debug
 
-
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c       Wed Nov  9 
15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/traps.c       Thu Nov 10 
10:43:26 2005
@@ -89,7 +89,7 @@
 
 static inline void conditional_sti(struct pt_regs *regs)
 {
-       if (regs->eflags & X86_EFLAGS_IF)
+       if ((uint8_t)(regs->cs >> 32) == 0)
                local_irq_enable();
 }
 
@@ -905,30 +905,33 @@
 }
 
 
+/*
+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
+ * specify <dpl>|4 in the second field.
+ */
 static trap_info_t trap_table[] = {
-        {  0, 0, (__KERNEL_CS|0x3), (unsigned long)divide_error               
},
-        {  1, 0, (__KERNEL_CS|0x3), (unsigned long)debug                      
},
-        {  3, 3, (__KERNEL_CS|0x3), (unsigned long)int3                       
},
-        {  4, 3, (__KERNEL_CS|0x3), (unsigned long)overflow                   
},
-        {  5, 3, (__KERNEL_CS|0x3), (unsigned long)bounds                     
},
-        {  6, 0, (__KERNEL_CS|0x3), (unsigned long)invalid_op                 
},
-        {  7, 0, (__KERNEL_CS|0x3), (unsigned long)device_not_available       
},
-        {  9, 0, (__KERNEL_CS|0x3), (unsigned 
long)coprocessor_segment_overrun},
-        { 10, 0, (__KERNEL_CS|0x3), (unsigned long)invalid_TSS                
},
-        { 11, 0, (__KERNEL_CS|0x3), (unsigned long)segment_not_present        
},
-        { 12, 0, (__KERNEL_CS|0x3), (unsigned long)stack_segment              
},
-        { 13, 0, (__KERNEL_CS|0x3), (unsigned long)general_protection         
},
-        { 14, 0, (__KERNEL_CS|0x3), (unsigned long)page_fault                 
},
-        { 15, 0, (__KERNEL_CS|0x3), (unsigned long)spurious_interrupt_bug     
},
-        { 16, 0, (__KERNEL_CS|0x3), (unsigned long)coprocessor_error          
},
-        { 17, 0, (__KERNEL_CS|0x3), (unsigned long)alignment_check            
},
+        {  0, 0|4, (__KERNEL_CS|0x3), (unsigned long)divide_error              
 },
+        {  1, 0|4, (__KERNEL_CS|0x3), (unsigned long)debug                     
 },
+        {  3, 3|4, (__KERNEL_CS|0x3), (unsigned long)int3                      
 },
+        {  4, 3|4, (__KERNEL_CS|0x3), (unsigned long)overflow                  
 },
+        {  5, 3|4, (__KERNEL_CS|0x3), (unsigned long)bounds                    
 },
+        {  6, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_op                
 },
+        {  7, 0|4, (__KERNEL_CS|0x3), (unsigned long)device_not_available      
 },
+        {  9, 0|4, (__KERNEL_CS|0x3), (unsigned 
long)coprocessor_segment_overrun},
+        { 10, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_TSS               
 },
+        { 11, 0|4, (__KERNEL_CS|0x3), (unsigned long)segment_not_present       
 },
+        { 12, 0|4, (__KERNEL_CS|0x3), (unsigned long)stack_segment             
 },
+        { 13, 0|4, (__KERNEL_CS|0x3), (unsigned long)general_protection        
 },
+        { 14, 0|4, (__KERNEL_CS|0x3), (unsigned long)page_fault                
 },
+        { 15, 0|4, (__KERNEL_CS|0x3), (unsigned long)spurious_interrupt_bug    
 },
+        { 16, 0|4, (__KERNEL_CS|0x3), (unsigned long)coprocessor_error         
 },
+        { 17, 0|4, (__KERNEL_CS|0x3), (unsigned long)alignment_check           
 },
 #ifdef CONFIG_X86_MCE
-        { 18, 0, (__KERNEL_CS|0x3), (unsigned long)machine_check              
},
-#endif
-        { 19, 0, (__KERNEL_CS|0x3), (unsigned long)simd_coprocessor_error     
},
-        { SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), (unsigned long)system_call   },
+        { 18, 0|4, (__KERNEL_CS|0x3), (unsigned long)machine_check             
 },
+#endif
+        { 19, 0|4, (__KERNEL_CS|0x3), (unsigned long)simd_coprocessor_error    
 },
 #ifdef CONFIG_IA32_EMULATION
-       { IA32_SYSCALL_VECTOR, 3, (__KERNEL_CS|0x3), (unsigned 
long)ia32_syscall},
+       { IA32_SYSCALL_VECTOR, 3|4, (__KERNEL_CS|0x3), (unsigned 
long)ia32_syscall},
 #endif
         {  0, 0,           0, 0                                              }
 };
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S   Wed Nov  9 
15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/xen_entry.S   Thu Nov 10 
10:43:26 2005
@@ -5,7 +5,7 @@
 #define evtchn_upcall_pending          0
 #define evtchn_upcall_mask             1
 
-#define sizeof_vcpu_shift              4
+#define sizeof_vcpu_shift              5
 
 #ifdef CONFIG_SMP
 //#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
diff -r ad6a208992cc -r 8a5dca8c1e8f 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c   Wed Nov  9 15:40:07 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c   Thu Nov 10 10:43:26 2005
@@ -318,12 +318,12 @@
  *     bit 2 == 0 means kernel, 1 means user-mode
  *      bit 3 == 1 means fault was an instruction fetch
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
-       unsigned long address)
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
+       unsigned long address;
        const struct exception_table_entry *fixup;
        int write;
        siginfo_t info;
@@ -342,11 +342,16 @@
                }
        }
 #endif
+
+       /* get the address */
+       address = HYPERVISOR_shared_info->vcpu_data[
+               smp_processor_id()].arch.cr2;
+
        if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
                                        SIGSEGV) == NOTIFY_STOP)
                return;
 
-       if (likely(regs->eflags & X86_EFLAGS_IF))
+       if (likely((uint8_t)(regs->cs >> 32) == 0))
                local_irq_enable();
 
        if (unlikely(page_fault_trace))
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/setup.c      Thu Nov 10 10:43:26 2005
@@ -430,6 +430,10 @@
     /* Sanity check for unwanted bloat of dom0_op_t structure. */
     BUG_ON(sizeof(((dom0_op_t *)0)->u) != sizeof(((dom0_op_t *)0)->u.pad));
 
+    BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
+    BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
+    BUG_ON(sizeof(vcpu_info_t) != (sizeof(unsigned long) * 4));
+
     init_frametable();
 
     end_boot_allocator();
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/shadow32.c   Thu Nov 10 10:43:26 2005
@@ -997,7 +997,8 @@
     if ( new_modes & SHM_log_dirty )
     {
         ASSERT( !d->arch.shadow_dirty_bitmap );
-        d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
+        d->arch.shadow_dirty_bitmap_size = 
+            (d->shared_info->arch.max_pfn +  63) & ~63;
         d->arch.shadow_dirty_bitmap = 
             xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size /
                                          (8 * sizeof(unsigned long)));
@@ -1287,34 +1288,28 @@
         d->arch.shadow_dirty_net_count   = 0;
         d->arch.shadow_dirty_block_count = 0;
  
-        if ( (d->max_pages > sc->pages) || 
-             (sc->dirty_bitmap == NULL) || 
+        if ( (sc->dirty_bitmap == NULL) || 
              (d->arch.shadow_dirty_bitmap == NULL) )
         {
             rc = -EINVAL;
             break;
         }
- 
-        sc->pages = d->max_pages;
+
+        if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+            sc->pages = d->arch.shadow_dirty_bitmap_size; 
 
 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
-        for ( i = 0; i < d->max_pages; i += chunk )
-        {
-            int bytes = ((((d->max_pages - i) > chunk) ?
-                          chunk : (d->max_pages - i)) + 7) / 8;
+        for ( i = 0; i < sc->pages; i += chunk )
+        {
+            int bytes = ((((sc->pages - i) > chunk) ?
+                          chunk : (sc->pages - i)) + 7) / 8;
      
             if (copy_to_user(
                     sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
                     d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
                     bytes))
             {
-                // copy_to_user can fail when copying to guest app memory.
-                // app should zero buffer after mallocing, and pin it
                 rc = -EINVAL;
-                memset(
-                    d->arch.shadow_dirty_bitmap + 
-                    (i/(8*sizeof(unsigned long))),
-                    0, (d->max_pages/8) - (i/(8*sizeof(unsigned long))));
                 break;
             }
 
@@ -1331,17 +1326,19 @@
         sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
         sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
  
-        if ( (d->max_pages > sc->pages) || 
-             (sc->dirty_bitmap == NULL) || 
+
+        if ( (sc->dirty_bitmap == NULL) || 
              (d->arch.shadow_dirty_bitmap == NULL) )
         {
             rc = -EINVAL;
             break;
         }
  
-        sc->pages = d->max_pages;
-        if (copy_to_user(
-            sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8))
+        if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+            sc->pages = d->arch.shadow_dirty_bitmap_size; 
+
+        if (copy_to_user(sc->dirty_bitmap, 
+                         d->arch.shadow_dirty_bitmap, (sc->pages+7)/8))
         {
             rc = -EINVAL;
             break;
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/shadow_public.c      Thu Nov 10 10:43:26 2005
@@ -1009,7 +1009,8 @@
     if ( new_modes & SHM_log_dirty )
     {
         ASSERT( !d->arch.shadow_dirty_bitmap );
-        d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
+        d->arch.shadow_dirty_bitmap_size = 
+            (d->shared_info->arch.max_pfn +  63) & ~63;
         d->arch.shadow_dirty_bitmap = 
             xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size /
                           (8 * sizeof(unsigned long)));
@@ -1163,34 +1164,29 @@
         d->arch.shadow_dirty_net_count   = 0;
         d->arch.shadow_dirty_block_count = 0;
  
-        if ( (d->max_pages > sc->pages) || 
-             (sc->dirty_bitmap == NULL) || 
+
+        if ( (sc->dirty_bitmap == NULL) || 
              (d->arch.shadow_dirty_bitmap == NULL) )
         {
             rc = -EINVAL;
             break;
         }
- 
-        sc->pages = d->max_pages;
+
+        if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+            sc->pages = d->arch.shadow_dirty_bitmap_size; 
 
 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
-        for ( i = 0; i < d->max_pages; i += chunk )
-        {
-            int bytes = ((((d->max_pages - i) > chunk) ?
-                          chunk : (d->max_pages - i)) + 7) / 8;
+        for ( i = 0; i < sc->pages; i += chunk )
+        {
+            int bytes = ((((sc->pages - i) > chunk) ?
+                          chunk : (sc->pages - i)) + 7) / 8;
 
             if (copy_to_user(
                 sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
                 d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
                 bytes))
             {
-                // copy_to_user can fail when copying to guest app memory.
-                // app should zero buffer after mallocing, and pin it
                 rc = -EINVAL;
-                memset(
-                    d->arch.shadow_dirty_bitmap + 
-                    (i/(8*sizeof(unsigned long))),
-                    0, (d->max_pages/8) - (i/(8*sizeof(unsigned long))));
                 break;
             }
             memset(
@@ -1206,17 +1202,18 @@
         sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
         sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
  
-        if ( (d->max_pages > sc->pages) || 
-             (sc->dirty_bitmap == NULL) || 
+        if ( (sc->dirty_bitmap == NULL) || 
              (d->arch.shadow_dirty_bitmap == NULL) )
         {
             rc = -EINVAL;
             break;
         }
  
-        sc->pages = d->max_pages;
-        if (copy_to_user(
-            sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8))
+        if(sc->pages > d->arch.shadow_dirty_bitmap_size)
+            sc->pages = d->arch.shadow_dirty_bitmap_size; 
+
+        if (copy_to_user(sc->dirty_bitmap, 
+                         d->arch.shadow_dirty_bitmap, (sc->pages+7)/8))
         {
             rc = -EINVAL;
             break;
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/traps.c      Thu Nov 10 10:43:26 2005
@@ -412,16 +412,16 @@
     struct vcpu *v = current;
     struct trap_bounce *tb = &v->arch.trap_bounce;
 
+    v->arch.guest_context.ctrlreg[2] = addr;
+    v->vcpu_info->arch.cr2           = addr;
+
     ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
-    tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
-    tb->cr2        = addr;
+    tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
     tb->error_code = error_code;
     tb->cs         = ti->cs;
     tb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
         tb->flags |= TBF_INTERRUPT;
-
-    v->arch.guest_context.ctrlreg[2] = addr;
 }
 
 static int handle_perdomain_mapping_fault(
@@ -931,6 +931,7 @@
 
         case 2: /* Write CR2 */
             v->arch.guest_context.ctrlreg[2] = *reg;
+            v->vcpu_info->arch.cr2           = *reg;
             break;
             
         case 3: /* Write CR3 */
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/x86_32/asm-offsets.c Thu Nov 10 10:43:26 2005
@@ -75,7 +75,6 @@
     BLANK();
 
     OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
-    OFFSET(TRAPBOUNCE_cr2, struct trap_bounce, cr2);
     OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
     OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
     OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/x86_32/entry.S       Thu Nov 10 10:43:26 2005
@@ -401,30 +401,25 @@
         subl $4,%esi                    # push error_code onto guest frame
         movl TRAPBOUNCE_error_code(%edx),%eax
 FLT17:  movl %eax,%gs:(%esi)
-        testb $TBF_EXCEPTION_CR2,%cl
-        jz   2f
-        subl $4,%esi                    # push %cr2 onto guest frame
-        movl TRAPBOUNCE_cr2(%edx),%eax
-FLT18:  movl %eax,%gs:(%esi)
 1:      testb $TBF_FAILSAFE,%cl
         jz   2f
         subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
         testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
         jz   nvm86_2
         xorl %eax,%eax               # VM86: we write zero selector values
-FLT19:  movl %eax,%gs:(%esi) 
-FLT20:  movl %eax,%gs:4(%esi)
-FLT21:  movl %eax,%gs:8(%esi) 
-FLT22:  movl %eax,%gs:12(%esi)
+FLT18:  movl %eax,%gs:(%esi) 
+FLT19:  movl %eax,%gs:4(%esi)
+FLT20:  movl %eax,%gs:8(%esi) 
+FLT21:  movl %eax,%gs:12(%esi)
         jmp  2f
 nvm86_2:movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
-FLT23:  movl %eax,%gs:(%esi) 
+FLT22:  movl %eax,%gs:(%esi) 
         movl UREGS_es+4(%esp),%eax
-FLT24:  movl %eax,%gs:4(%esi)
+FLT23:  movl %eax,%gs:4(%esi)
         movl UREGS_fs+4(%esp),%eax
-FLT25:  movl %eax,%gs:8(%esi) 
+FLT24:  movl %eax,%gs:8(%esi) 
         movl UREGS_gs+4(%esp),%eax
-FLT26:  movl %eax,%gs:12(%esi)
+FLT25:  movl %eax,%gs:12(%esi)
 2:      testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
         jz   nvm86_3
         xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
@@ -456,7 +451,6 @@
         .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
         .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
         .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
-        .long FLT26,domain_crash_synchronous
 .previous
 
         ALIGN
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/x86_64/asm-offsets.c Thu Nov 10 10:43:26 2005
@@ -75,7 +75,6 @@
     BLANK();
 
     OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
-    OFFSET(TRAPBOUNCE_cr2, struct trap_bounce, cr2);
     OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
     OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
     OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Wed Nov  9 15:40:07 2005
+++ b/xen/arch/x86/x86_64/entry.S       Thu Nov 10 10:43:26 2005
@@ -370,27 +370,22 @@
         subq  $8,%rsi
         movl  TRAPBOUNCE_error_code(%rdx),%eax
 FLT7:   movq  %rax,(%rsi)               # ERROR CODE
-        testb $TBF_EXCEPTION_CR2,%cl
-        jz    2f
-        subq  $8,%rsi
-        movq  TRAPBOUNCE_cr2(%rdx),%rax
-FLT8:   movq  %rax,(%rsi)               # CR2
 1:      testb $TBF_FAILSAFE,%cl
         jz    2f
         subq  $32,%rsi
         movl  %gs,%eax
-FLT9:   movq  %rax,24(%rsi)             # GS
+FLT8:   movq  %rax,24(%rsi)             # GS
         movl  %fs,%eax
-FLT10:  movq  %rax,16(%rsi)             # FS
+FLT9:   movq  %rax,16(%rsi)             # FS
         movl  %es,%eax
-FLT11:  movq  %rax,8(%rsi)              # ES
+FLT10:  movq  %rax,8(%rsi)              # ES
         movl  %ds,%eax
-FLT12:  movq  %rax,(%rsi)               # DS
+FLT11:  movq  %rax,(%rsi)               # DS
 2:      subq  $16,%rsi
         movq  UREGS_r11+8(%rsp),%rax
-FLT13:  movq  %rax,8(%rsi)              # R11
+FLT12:  movq  %rax,8(%rsi)              # R11
         movq  UREGS_rcx+8(%rsp),%rax
-FLT14:  movq  %rax,(%rsi)               # RCX
+FLT13:  movq  %rax,(%rsi)               # RCX
         /* Rewrite our stack frame and return to guest-OS mode. */
         /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
         movl  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
@@ -411,7 +406,6 @@
         .quad  FLT8,domain_crash_synchronous ,  FLT9,domain_crash_synchronous
         .quad FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
         .quad FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
-        .quad FLT14,domain_crash_synchronous
 .previous
 
         ALIGN
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Nov  9 15:40:07 2005
+++ b/xen/include/asm-x86/domain.h      Thu Nov 10 10:43:26 2005
@@ -8,7 +8,6 @@
 
 struct trap_bounce {
     unsigned long  error_code;
-    unsigned long  cr2;
     unsigned short flags; /* TBF_ */
     unsigned short cs;
     unsigned long  eip;
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Wed Nov  9 15:40:07 2005
+++ b/xen/include/asm-x86/processor.h   Thu Nov 10 10:43:26 2005
@@ -121,7 +121,6 @@
 /* 'trap_bounce' flags values */
 #define TBF_EXCEPTION          1
 #define TBF_EXCEPTION_ERRCODE  2
-#define TBF_EXCEPTION_CR2      4
 #define TBF_INTERRUPT          8
 #define TBF_FAILSAFE          16
 
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed Nov  9 15:40:07 2005
+++ b/xen/include/public/arch-ia64.h    Thu Nov 10 10:43:26 2005
@@ -271,12 +271,10 @@
 
 typedef mapped_regs_t vpd_t;
 
-#define __ARCH_HAS_VCPU_INFO
-
 typedef struct {
     unsigned int flags;
     unsigned long start_info_pfn;
-} arch_shared_info_t;  // DON'T PACK 
+} arch_shared_info_t;
 
 typedef struct vcpu_guest_context {
 #define VGCF_FPU_VALID (1<<0)
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/public/arch-x86_32.h
--- a/xen/include/public/arch-x86_32.h  Wed Nov  9 15:40:07 2005
+++ b/xen/include/public/arch-x86_32.h  Thu Nov 10 10:43:26 2005
@@ -132,6 +132,11 @@
     unsigned long pfn_to_mfn_frame_list_list; 
 } arch_shared_info_t;
 
+typedef struct {
+    unsigned long cr2;
+    unsigned long pad; /* sizeof(vcpu_info_t) == 16 */
+} arch_vcpu_info_t;
+
 #endif
 
 #endif
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/public/arch-x86_64.h
--- a/xen/include/public/arch-x86_64.h  Wed Nov  9 15:40:07 2005
+++ b/xen/include/public/arch-x86_64.h  Thu Nov 10 10:43:26 2005
@@ -201,6 +201,11 @@
     unsigned long pfn_to_mfn_frame_list_list; 
 } arch_shared_info_t;
 
+typedef struct {
+    unsigned long cr2;
+    unsigned long pad; /* sizeof(vcpu_info_t) == 32 */
+} arch_vcpu_info_t;
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h     Wed Nov  9 15:40:07 2005
+++ b/xen/include/public/dom0_ops.h     Thu Nov 10 10:43:26 2005
@@ -19,7 +19,7 @@
  * This makes sure that old versions of dom0 tools will stop working in a
  * well-defined way (rather than crashing the machine, for instance).
  */
-#define DOM0_INTERFACE_VERSION   0xAAAA1011
+#define DOM0_INTERFACE_VERSION   0xAAAA1012
 
 /************************************************************************/
 
diff -r ad6a208992cc -r 8a5dca8c1e8f xen/include/public/xen.h
--- a/xen/include/public/xen.h  Wed Nov  9 15:40:07 2005
+++ b/xen/include/public/xen.h  Thu Nov 10 10:43:26 2005
@@ -299,9 +299,7 @@
     uint8_t evtchn_upcall_pending;
     uint8_t evtchn_upcall_mask;
     unsigned long evtchn_pending_sel;
-#ifdef __ARCH_HAS_VCPU_INFO
     arch_vcpu_info_t arch;
-#endif
 } vcpu_info_t;
 
 typedef struct vcpu_time_info {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>