WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Fix lazy state switching when context-switching to/from

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Fix lazy state switching when context-switching to/from the idle
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Tue, 29 Mar 2005 21:10:08 +0000
Delivery-date: Tue, 29 Mar 2005 22:03:11 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1390, 2005/03/29 22:10:08+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Fix lazy state switching when context-switching to/from the idle
        domain. Track which domain's state is on each CPU and, for each
        domain, which CPUs are running on its page tables.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/ia64/xenmisc.c     |    2 
 arch/x86/domain.c       |  187 +++++++++++++++++++++--------------
 arch/x86/domain_build.c |    4 
 arch/x86/mm.c           |   13 --
 arch/x86/shadow.c       |    1 
 arch/x86/smp.c          |  254 ++++++++++++++++++------------------------------
 arch/x86/x86_32/mm.c    |   16 ---
 arch/x86/x86_64/mm.c    |   17 ---
 common/dom0_ops.c       |    1 
 common/page_alloc.c     |    7 -
 common/schedule.c       |    1 
 include/asm-x86/mm.h    |    6 -
 include/public/xen.h    |   28 +++--
 include/xen/sched.h     |   38 ++++---
 include/xen/smp.h       |    9 +
 15 files changed, 276 insertions(+), 308 deletions(-)


diff -Nru a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c   2005-03-29 17:03:07 -05:00
+++ b/xen/arch/ia64/xenmisc.c   2005-03-29 17:03:07 -05:00
@@ -53,7 +53,7 @@
 }
 
 /* calls in xen/common code that are unused on ia64 */
-void synchronise_pagetables(unsigned long cpu_mask) { return; }
+void synchronise_execution_state(unsigned long cpu_mask) { }
 
 int grant_table_create(struct domain *d) { return 0; }
 void grant_table_destroy(struct domain *d)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     2005-03-29 17:03:07 -05:00
+++ b/xen/arch/x86/domain.c     2005-03-29 17:03:07 -05:00
@@ -45,13 +45,18 @@
 static int opt_noreboot = 0;
 boolean_param("noreboot", opt_noreboot);
 
+struct percpu_ctxt {
+    struct exec_domain *curr_ed;
+} __cacheline_aligned;
+static struct percpu_ctxt percpu_ctxt[NR_CPUS];
+
 static void default_idle(void)
 {
-    __cli();
+    local_irq_disable();
     if ( !softirq_pending(smp_processor_id()) )
         safe_halt();
     else
-        __sti();
+        local_irq_enable();
 }
 
 static __attribute_used__ void idle_loop(void)
@@ -73,6 +78,8 @@
 {
     /* Just some sanity to ensure that the scheduler is set up okay. */
     ASSERT(current->domain->id == IDLE_DOMAIN_ID);
+    percpu_ctxt[smp_processor_id()].curr_ed = current;
+    set_bit(smp_processor_id(), &current->domain->cpuset);
     domain_unpause_by_systemcontroller(current->domain);
     raise_softirq(SCHEDULE_SOFTIRQ);
     do_softirq();
@@ -110,7 +117,7 @@
             safe_halt();
     }
 
-    __sti();
+    local_irq_enable();
 
     /* Ensure we are the boot CPU. */
     if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid )
@@ -307,10 +314,10 @@
     struct pfn_info *mmfn_info;
     struct domain *d = ed->domain;
 
-    ASSERT(!pagetable_val(ed->arch.monitor_table)); /* we should only get 
called once */
+    ASSERT(pagetable_val(ed->arch.monitor_table) == 0);
 
     mmfn_info = alloc_domheap_page(NULL);
-    ASSERT( mmfn_info ); 
+    ASSERT(mmfn_info != NULL); 
 
     mmfn = (unsigned long) (mmfn_info - frame_table);
     mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
@@ -326,7 +333,7 @@
 
     ed->arch.monitor_vtable = mpl2e;
 
-    // map the phys_to_machine map into the Read-Only MPT space for this domain
+    /* Map the p2m map into the Read-Only MPT space for this domain. */
     mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
         mk_l2_pgentry(pagetable_val(ed->arch.phys_table) | __PAGE_HYPERVISOR);
 
@@ -578,19 +585,10 @@
         : "=r" (__r) : "r" (value), "0" (__r) );\
     __r; })
 
-static void switch_segments(
-    struct xen_regs *regs, struct exec_domain *p, struct exec_domain *n)
+static void load_segments(struct exec_domain *p, struct exec_domain *n)
 {
     int all_segs_okay = 1;
 
-    if ( !is_idle_task(p->domain) )
-    {
-        __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) );
-        __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) );
-        __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) );
-        __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) );
-    }
-
     /* Either selector != 0 ==> reload. */
     if ( unlikely(p->arch.user_ctxt.ds |
                   n->arch.user_ctxt.ds) )
@@ -654,7 +652,8 @@
 
     if ( unlikely(!all_segs_okay) )
     {
-        unsigned long *rsp =
+        struct xen_regs *regs = get_execution_context();
+        unsigned long   *rsp =
             (n->arch.flags & TF_kernel_mode) ?
             (unsigned long *)regs->rsp : 
             (unsigned long *)n->arch.kernel_sp;
@@ -689,6 +688,24 @@
     }
 }
 
+static void save_segments(struct exec_domain *p)
+{
+    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) );
+    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) );
+    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) );
+    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) );
+}
+
+static void clear_segments(void)
+{
+    __asm__ __volatile__ (
+        "movl %0,%%ds; "
+        "movl %0,%%es; "
+        "movl %0,%%fs; "
+        "movl %0,%%gs; swapgs; movl %0,%%gs"
+        : : "r" (0) );
+}
+
 long do_switch_to_user(void)
 {
     struct xen_regs       *regs = get_execution_context();
@@ -720,80 +737,96 @@
 
 #elif defined(__i386__)
 
-#define switch_segments(_r, _p, _n) ((void)0)
+#define load_segments(_p, _n) ((void)0)
+#define save_segments(_p)     ((void)0)
+#define clear_segments()      ((void)0)
 
 #endif
 
-/*
- * This special macro can be used to load a debugging register
- */
 #define loaddebug(_ed,_reg) \
-               __asm__("mov %0,%%db" #_reg  \
-                       : /* no output */ \
-                       :"r" ((_ed)->debugreg[_reg]))
+       __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" 
((_ed)->debugreg[_reg]))
 
-void context_switch(struct exec_domain *prev_p, struct exec_domain *next_p)
+static void __context_switch(void)
 {
-#ifdef __i386__
-    struct tss_struct *tss = init_tss + smp_processor_id();
-#endif
     execution_context_t *stack_ec = get_execution_context();
+    unsigned int         cpu = smp_processor_id();
+    struct exec_domain  *p = percpu_ctxt[cpu].curr_ed;
+    struct exec_domain  *n = current;
 
-    __cli();
-
-    /* Switch guest general-register state. */
-    if ( !is_idle_task(prev_p->domain) )
+    if ( !is_idle_task(p->domain) )
     {
-        memcpy(&prev_p->arch.user_ctxt,
+        memcpy(&p->arch.user_ctxt,
                stack_ec, 
                sizeof(*stack_ec));
-        unlazy_fpu(prev_p);
-        CLEAR_FAST_TRAP(&prev_p->arch);
+        unlazy_fpu(p);
+        CLEAR_FAST_TRAP(&p->arch);
+        save_segments(p);
     }
 
-    if ( !is_idle_task(next_p->domain) )
-    {
-        memcpy(stack_ec,
-               &next_p->arch.user_ctxt,
-               sizeof(*stack_ec));
+    memcpy(stack_ec,
+           &n->arch.user_ctxt,
+           sizeof(*stack_ec));
 
-        /* Maybe switch the debug registers. */
-        if ( unlikely(next_p->arch.debugreg[7]) )
-        {
-            loaddebug(&next_p->arch, 0);
-            loaddebug(&next_p->arch, 1);
-            loaddebug(&next_p->arch, 2);
-            loaddebug(&next_p->arch, 3);
-            /* no 4 and 5 */
-            loaddebug(&next_p->arch, 6);
-            loaddebug(&next_p->arch, 7);
-        }
+    /* Maybe switch the debug registers. */
+    if ( unlikely(n->arch.debugreg[7]) )
+    {
+        loaddebug(&n->arch, 0);
+        loaddebug(&n->arch, 1);
+        loaddebug(&n->arch, 2);
+        loaddebug(&n->arch, 3);
+        /* no 4 and 5 */
+        loaddebug(&n->arch, 6);
+        loaddebug(&n->arch, 7);
+    }
 
-        if ( !VMX_DOMAIN(next_p) )
-        {
-            SET_FAST_TRAP(&next_p->arch);
+    if ( !VMX_DOMAIN(n) )
+    {
+        SET_FAST_TRAP(&n->arch);
 
 #ifdef __i386__
+        {
             /* Switch the kernel ring-1 stack. */
-            tss->esp1 = next_p->arch.kernel_sp;
-            tss->ss1  = next_p->arch.kernel_ss;
-#endif
+            struct tss_struct *tss = &init_tss[cpu];
+            tss->esp1 = n->arch.kernel_sp;
+            tss->ss1  = n->arch.kernel_ss;
         }
-
-        /* Switch page tables. */
-        write_ptbase(next_p);
+#endif
     }
 
-    set_current(next_p);
+    set_bit(cpu, &n->domain->cpuset);
+    write_ptbase(n);
+    clear_bit(cpu, &p->domain->cpuset);
+
+    __asm__ __volatile__ ( "lgdt %0" : "=m" (*n->arch.gdt) );
+
+    percpu_ctxt[cpu].curr_ed = n;
+}
+
 
-    __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt));
+void context_switch(struct exec_domain *prev, struct exec_domain *next)
+{
+    struct exec_domain *realprev;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Fix lazy state switching when context-switching to/from the idle, BitKeeper Bot <=