WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] xentrace: Trace p2m events

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] xentrace: Trace p2m events
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 03 Feb 2010 01:55:16 -0800
Delivery-date: Wed, 03 Feb 2010 01:55:42 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1265189723 0
# Node ID 6ade83cb21ca6a102c598decb9893969b607715d
# Parent  3312e31dcdeb0143379369b791a77e419620c893
xentrace: Trace p2m events

Add more tracing to aid in debugging ballooning / PoD:
* Nested page faults for EPT/NPT systems
* set_p2m_enry
* Decrease reservation (for ballooning)
* PoD populate, zero reclaim, superpage splinter

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c |   16 +++++++++
 xen/arch/x86/hvm/vmx/vmx.c |   16 +++++++++
 xen/arch/x86/mm/p2m.c      |   76 ++++++++++++++++++++++++++++++++++++++++++++-
 xen/common/memory.c        |   15 ++++++++
 xen/include/public/trace.h |    8 ++++
 5 files changed, 130 insertions(+), 1 deletion(-)

diff -r 3312e31dcdeb -r 6ade83cb21ca xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Feb 03 09:33:12 2010 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Feb 03 09:35:23 2010 +0000
@@ -893,6 +893,22 @@ static void svm_do_nested_pgfault(paddr_
     mfn_t mfn;
     p2m_type_t p2mt;
 
+    if ( tb_init_done )
+    {
+        struct {
+            uint64_t gpa;
+            uint64_t mfn;
+            u32 qualification;
+            u32 p2mt;
+        } _d;
+
+        _d.gpa = gpa;
+        _d.qualification = 0;
+        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+        
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+    }
+
     if ( hvm_hap_nested_page_fault(gfn) )
         return;
 
diff -r 3312e31dcdeb -r 6ade83cb21ca xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Feb 03 09:33:12 2010 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Feb 03 09:35:23 2010 +0000
@@ -2100,6 +2100,22 @@ static void ept_handle_violation(unsigne
     mfn_t mfn;
     p2m_type_t p2mt;
 
+    if ( tb_init_done )
+    {
+        struct {
+            uint64_t gpa;
+            uint64_t mfn;
+            u32 qualification;
+            u32 p2mt;
+        } _d;
+
+        _d.gpa = gpa;
+        _d.qualification = qualification;
+        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+        
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+    }
+
     if ( (qualification & EPT_GLA_VALID) &&
          hvm_hap_nested_page_fault(gfn) )
         return;
diff -r 3312e31dcdeb -r 6ade83cb21ca xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Feb 03 09:33:12 2010 +0000
+++ b/xen/arch/x86/mm/p2m.c     Wed Feb 03 09:35:23 2010 +0000
@@ -829,6 +829,21 @@ p2m_pod_zero_check_superpage(struct doma
             goto out_reset;
     }
 
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.d = d->domain_id;
+        t.order = 9;
+
+        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char 
*)&t);
+    }
+
     /* Finally!  We've passed all the checks, and can add the mfn superpage
      * back on the PoD cache, and account for the new p2m PoD entries */
     p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
@@ -928,6 +943,21 @@ p2m_pod_zero_check(struct domain *d, uns
         }
         else
         {
+            if ( tb_init_done )
+            {
+                struct {
+                    u64 gfn, mfn;
+                    int d:16,order:16;
+                } t;
+
+                t.gfn = gfns[i];
+                t.mfn = mfn_x(mfns[i]);
+                t.d = d->domain_id;
+                t.order = 0;
+        
+                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned 
char *)&t);
+            }
+
             /* Add to cache, and account for the new p2m PoD entry */
             p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0);
             d->arch.p2m->pod.entry_count++;
@@ -1073,6 +1103,21 @@ p2m_pod_demand_populate(struct domain *d
     p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
     BUG_ON(p2md->pod.entry_count < 0);
 
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.d = d->domain_id;
+        t.order = order;
+        
+        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+    }
+
     return 0;
 out_of_memory:
     spin_unlock(&d->page_alloc_lock);
@@ -1091,6 +1136,18 @@ remap_and_retry:
     for(i=0; i<(1<<order); i++)
         set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
                       p2m_populate_on_demand);
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn;
+            int d:16;
+        } t;
+
+        t.gfn = gfn;
+        t.d = d->domain_id;
+        
+        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned 
char *)&t);
+    }
 
     return 0;
 }
@@ -1140,6 +1197,23 @@ p2m_set_entry(struct domain *d, unsigned
     l1_pgentry_t entry_content;
     l2_pgentry_t l2e_content;
     int rv=0;
+
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int p2mt;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.p2mt = p2mt;
+        t.d = d->domain_id;
+        t.order = page_order;
+
+        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+    }
 
 #if CONFIG_PAGING_LEVELS >= 4
     if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
@@ -1225,7 +1299,7 @@ p2m_set_entry(struct domain *d, unsigned
     /* Success */
     rv = 1;
 
- out:
+out:
     unmap_domain_page(table);
     return rv;
 }
diff -r 3312e31dcdeb -r 6ade83cb21ca xen/common/memory.c
--- a/xen/common/memory.c       Wed Feb 03 09:33:12 2010 +0000
+++ b/xen/common/memory.c       Wed Feb 03 09:35:23 2010 +0000
@@ -28,6 +28,7 @@
 #include <xen/numa.h>
 #include <public/memory.h>
 #include <xsm/xsm.h>
+#include <xen/trace.h>
 
 struct memop_args {
     /* INPUT */
@@ -221,6 +222,20 @@ static void decrease_reservation(struct 
 
         if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
             goto out;
+
+        if ( tb_init_done )
+        {
+            struct {
+                u64 gfn;
+                int d:16,order:16;
+            } t;
+
+            t.gfn = gmfn;
+            t.d = a->domain->domain_id;
+            t.order = a->extent_order;
+        
+            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned 
char *)&t);
+        }
 
         /* See if populate-on-demand wants to handle this */
         if ( is_hvm_domain(a->domain)
diff -r 3312e31dcdeb -r 6ade83cb21ca xen/include/public/trace.h
--- a/xen/include/public/trace.h        Wed Feb 03 09:33:12 2010 +0000
+++ b/xen/include/public/trace.h        Wed Feb 03 09:35:23 2010 +0000
@@ -82,6 +82,12 @@
 #define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
 #define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
 #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY       (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE        (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM    (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
 
 #define TRC_PV_HYPERCALL             (TRC_PV +  1)
 #define TRC_PV_TRAP                  (TRC_PV +  3)
@@ -149,6 +155,8 @@
 #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
 #define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
 #define TRC_HVM_INTR_WINDOW     (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF             (TRC_HVM_HANDLER + 0x21)
+
 #define TRC_HVM_IOPORT_WRITE    (TRC_HVM_HANDLER + 0x216)
 #define TRC_HVM_IOMEM_WRITE     (TRC_HVM_HANDLER + 0x217)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] xentrace: Trace p2m events, Xen patchbot-unstable <=