WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] reduce 'd' debug key's global impact

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] reduce 'd' debug key's global impact
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Tue, 04 May 2010 16:45:24 +0100
Delivery-date: Tue, 04 May 2010 08:45:47 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
On large systems, dumping state may cause time management to get
stalled for so long a period that it wouldn't recover. Therefore alter
the state dumping logic to
- block each CPU as it prints rather than one CPU for a very long time
- don't print useless data (e.g. the hypervisor context of the
  interrupt that is used for triggering the printing, but isn't part
  of the context that's actuall interesting.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2010-05-04.orig/xen/arch/ia64/linux-xen/smp.c       2009-05-27 
13:54:05.000000000 +0200
+++ 2010-05-04/xen/arch/ia64/linux-xen/smp.c    2010-05-04 13:22:27.000000000 
+0200
@@ -189,7 +189,7 @@ handle_IPI (int irq, void *dev_id, struc
                                       * At this point the structure may be 
gone unless
                                       * wait is true.
                                       */
-                                     (*func)(info);
+                                     (*func)(info ?: regs);
 
                                      /* Notify the sending CPU that the task 
is done.  */
                                      mb();
--- 2010-05-04.orig/xen/arch/x86/smp.c  2010-04-22 14:43:25.000000000 +0200
+++ 2010-05-04/xen/arch/x86/smp.c       2010-05-04 13:22:27.000000000 +0200
@@ -395,7 +395,7 @@ static void __smp_call_function_interrup
 
     if ( call_data.wait )
     {
-        (*func)(info);
+        (*func)(info ?: get_irq_regs());
         mb();
         atomic_inc(&call_data.finished);
     }
@@ -403,7 +403,7 @@ static void __smp_call_function_interrup
     {
         mb();
         atomic_inc(&call_data.started);
-        (*func)(info);
+        (*func)(info ?: get_irq_regs());
     }
 
     irq_exit();
--- 2010-05-04.orig/xen/common/keyhandler.c     2010-05-04 13:21:53.000000000 
+0200
+++ 2010-05-04/xen/common/keyhandler.c  2010-05-04 13:22:27.000000000 +0200
@@ -71,20 +71,45 @@ static struct keyhandler show_handlers_k
     .desc = "show this message"
 };
 
-static void __dump_execstate(void *unused)
+static cpumask_t dump_execstate_mask;
+
+static void __dump_execstate(void *_regs)
 {
-    dump_execution_state();
-    printk("*** Dumping CPU%d guest state: ***\n", smp_processor_id());
+    struct cpu_user_regs *regs = _regs;
+    unsigned int cpu = smp_processor_id();
+
+    if ( !guest_mode(regs) )
+    {
+        printk("\n*** Dumping CPU%u host state: ***\n", cpu);
+        show_execution_state(regs);
+    }
     if ( is_idle_vcpu(current) )
-        printk("No guest context (CPU is idle).\n");
+        printk("No guest context (CPU%u is idle).\n", cpu);
     else
+    {
+        printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n",
+               smp_processor_id(), current->domain->domain_id,
+               current->vcpu_id);
         show_execution_state(guest_cpu_user_regs());
+    }
+
+    cpu = cycle_cpu(cpu, dump_execstate_mask);
+    if ( cpu < NR_CPUS )
+    {
+        cpu_clear(cpu, dump_execstate_mask);
+        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 0);
+    }
+    else
+    {
+        printk("\n");
+
+        console_end_sync();
+        watchdog_enable();
+    }
 }
 
 static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
 {
-    unsigned int cpu;
-
     /* We want to get everything out that we possibly can. */
     watchdog_disable();
     console_start_sync();
@@ -92,21 +117,9 @@ static void dump_registers(unsigned char
     printk("'%c' pressed -> dumping registers\n", key);
 
     /* Get local execution state out immediately, in case we get stuck. */
-    printk("\n*** Dumping CPU%d host state: ***\n", smp_processor_id());
-    __dump_execstate(NULL);
-
-    for_each_online_cpu ( cpu )
-    {
-        if ( cpu == smp_processor_id() )
-            continue;
-        printk("\n*** Dumping CPU%d host state: ***\n", cpu);
-        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
-    }
-
-    printk("\n");
-
-    console_end_sync();
-    watchdog_enable();
+    cpus_andnot(dump_execstate_mask, cpu_online_map,
+                cpumask_of_cpu(smp_processor_id()));
+    __dump_execstate(regs);
 }
 
 static struct keyhandler dump_registers_keyhandler = {
--- 2010-05-04.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2010-04-19 
09:12:58.000000000 +0200
+++ 2010-05-04/xen/include/asm-ia64/linux-xen/asm/ptrace.h      2010-05-04 
13:22:27.000000000 +0200
@@ -280,7 +280,7 @@ struct switch_stack {
 # define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + 
IA64_STK_OFFSET)) - 1)
 # define ia64_psr(regs)                        ((struct ia64_psr *) 
&(regs)->cr_ipsr)
 #ifdef XEN
-# define guest_mode(regs)              (ia64_psr(regs)->cpl != 0)
+# define guest_mode(regs)              (ia64_psr(regs)->cpl && 
!ia64_psr(regs)->vm)
 # define guest_kernel_mode(regs)       (ia64_psr(regs)->cpl == 
CONFIG_CPL0_EMUL)
 # define vmx_guest_kernel_mode(regs)   (ia64_psr(regs)->cpl == 0)
 # define regs_increment_iip(regs)                                      \



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel