diff -r 7cffee0c978a xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Aug 06 12:21:32 2008 +0100 @@ -1250,10 +1250,12 @@ asmlinkage void svm_vmexit_handler(struc if ( paging_fault(va, regs) ) { - if (hvm_long_mode_enabled(v)) - HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va)); - else - HVMTRACE_2D(PF_XEN, regs->error_code, va); + if ( !trace_will_trace_event(TRC_SHADOW) ) { + if (hvm_long_mode_enabled(v)) + HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va)); + else + HVMTRACE_2D(PF_XEN, regs->error_code, va); + } break; } @@ -1261,7 +1263,7 @@ asmlinkage void svm_vmexit_handler(struc break; } - /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ + /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ case VMEXIT_EXCEPTION_MC: HVMTRACE_0D(MCE); break; diff -r 7cffee0c978a xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 06 12:21:32 2008 +0100 @@ -2101,7 +2101,7 @@ asmlinkage void vmx_vmexit_handler(struc !(__vmread(IDT_VECTORING_INFO) & INTR_INFO_VALID_MASK) && (vector != TRAP_double_fault) ) __vmwrite(GUEST_INTERRUPTIBILITY_INFO, - __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI); + __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI); perfc_incra(cause_vector, vector); @@ -2128,12 +2128,14 @@ asmlinkage void vmx_vmexit_handler(struc if ( paging_fault(exit_qualification, regs) ) { - if ( hvm_long_mode_enabled(v) ) - HVMTRACE_LONG_2D (PF_XEN, regs->error_code, - TRC_PAR_LONG(exit_qualification) ); - else - HVMTRACE_2D (PF_XEN, - regs->error_code, exit_qualification ); + if( !trace_will_trace_event(TRC_SHADOW) ) { + if ( hvm_long_mode_enabled(v) ) + HVMTRACE_LONG_2D (PF_XEN, regs->error_code, + TRC_PAR_LONG(exit_qualification) ); + else + HVMTRACE_2D (PF_XEN, + regs->error_code, exit_qualification ); + } break; } diff -r 7cffee0c978a xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Wed Aug 06 12:21:32 2008 +0100 @@ -39,6 +39,7 @@ #include #include "private.h" +DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags); /* Set up the shadow-specific parts of a domain struct at start of day. * Called for every domain from arch_domain_create() */ @@ -1005,6 +1006,7 @@ void shadow_promote(struct vcpu *v, mfn_ ASSERT(!test_bit(type, &page->shadow_flags)); set_bit(type, &page->shadow_flags); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PROMOTE); } void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type) @@ -1027,6 +1029,8 @@ void shadow_demote(struct vcpu *v, mfn_t #endif clear_bit(_PGC_page_table, &page->count_info); } + + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_DEMOTE); } /**************************************************************************/ @@ -1094,6 +1098,7 @@ sh_validate_guest_entry(struct vcpu *v, ASSERT((page->shadow_flags & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0); #endif + this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED)); return result; } @@ -1295,6 +1300,18 @@ static void shadow_unhook_mappings(struc } } +static inline void trace_shadow_prealloc_unpin(struct domain *d, mfn_t smfn) +{ + if ( tb_init_done ) + { + /* Convert smfn to gfn */ + unsigned long gfn; + ASSERT(mfn_valid(smfn)); + gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->backpointer)); + __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/, + sizeof(gfn), (unsigned char*)&gfn); + } +} /* Make sure there are at least count order-sized pages * available in the shadow page pool. */ @@ -1327,6 +1344,7 @@ static void _shadow_prealloc( smfn = shadow_page_to_mfn(sp); /* Unpin this top-level shadow */ + trace_shadow_prealloc_unpin(d, smfn); sh_unpin(v, smfn); /* See if that freed up enough space */ @@ -1343,6 +1361,7 @@ static void _shadow_prealloc( { if ( !pagetable_is_null(v2->arch.shadow_table[i]) ) { + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK); shadow_unhook_mappings(v, pagetable_get_mfn(v2->arch.shadow_table[i])); @@ -2200,6 +2219,16 @@ void sh_destroy_shadow(struct vcpu *v, m } } +static inline void trace_shadow_wrmap_bf(mfn_t gmfn) +{ + if ( tb_init_done ) + { + /* Convert gmfn to gfn */ + unsigned long gfn = mfn_to_gfn(current->domain, gmfn); + __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), (unsigned char*)&gfn); + } +} + /**************************************************************************/ /* Remove all writeable mappings of a guest frame from the shadow tables * Returns non-zero if we need to flush TLBs. @@ -2265,6 +2294,8 @@ int sh_remove_write_access(struct vcpu * || (pg->u.inuse.type_info & PGT_count_mask) == 0 ) return 0; + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP); + perfc_incr(shadow_writeable); /* If this isn't a "normal" writeable page, the domain is trying to @@ -2285,11 +2316,14 @@ int sh_remove_write_access(struct vcpu * * and that mapping is likely to be in the current pagetable, * in the guest's linear map (on non-HIGHPTE linux and windows)*/ -#define GUESS(_a, _h) do { \ +#define GUESS(_a, _h) do { \ if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \ - perfc_incr(shadow_writeable_h_ ## _h); \ - if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ - return 1; \ + perfc_incr(shadow_writeable_h_ ## _h); \ + if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ + { \ + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP_GUESS_FOUND); \ + return 1; \ + } \ } while (0) if ( level == 0 && fault_addr ) @@ -2377,6 +2411,7 @@ int sh_remove_write_access(struct vcpu * #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */ /* Brute-force search of all the shadows, by walking the hash */ + trace_shadow_wrmap_bf(gmfn); if ( level == 0 ) perfc_incr(shadow_writeable_bf_1); else diff -r 7cffee0c978a xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Aug 06 12:21:32 2008 +0100 @@ -225,6 +225,7 @@ static uint32_t set_ad_bits(void *guest_ static uint32_t set_ad_bits(void *guest_p, void *walk_p, int set_dirty) { guest_intpte_t old, new; + int ret = 0; old = *(guest_intpte_t *)walk_p; new = old | _PAGE_ACCESSED | (set_dirty ? _PAGE_DIRTY : 0); @@ -234,10 +235,16 @@ static uint32_t set_ad_bits(void *guest_ * into the guest table as well. If the guest table has changed * under out feet then leave it alone. */ *(guest_intpte_t *)walk_p = new; - if ( cmpxchg(((guest_intpte_t *)guest_p), old, new) == old ) - return 1; - } - return 0; + if( cmpxchg(((guest_intpte_t *)guest_p), old, new) == old ) + ret = 1; + + /* FIXME -- this code is longer than necessary */ + if(set_dirty) + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SET_AD); + else + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SET_A); + } + return ret; } /* This validation is called with lock held, and after write permission @@ -1425,6 +1432,7 @@ static int shadow_set_l1e(struct vcpu *v { /* About to install a new reference */ if ( shadow_mode_refcounts(d) ) { + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF); if ( shadow_get_page_from_l1e(new_sl1e, d) == 0 ) { /* Doesn't look like a pagetable. */ @@ -1462,6 +1470,7 @@ static int shadow_set_l1e(struct vcpu *v { shadow_vram_put_l1e(old_sl1e, sl1e, sl1mfn, d); shadow_put_page_from_l1e(old_sl1e, d); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_PUT_REF); } } return flags; @@ -2897,6 +2906,7 @@ static inline void check_for_early_unsha { perfc_incr(shadow_early_unshadow); sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ ); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EARLY_UNSHADOW); } v->arch.paging.shadow.last_emulated_mfn_for_unshadow = mfn_x(gmfn); #endif @@ -3013,6 +3023,132 @@ static void sh_prefetch(struct vcpu *v, #endif /* SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH */ +#if GUEST_PAGING_LEVELS == 4 +typedef u64 guest_va_t; +typedef u64 guest_pa_t; +#elif GUEST_PAGING_LEVELS == 3 +typedef u32 guest_va_t; +typedef u64 guest_pa_t; +#else +typedef u32 guest_va_t; +typedef u32 guest_pa_t; +#endif + +static inline void trace_shadow_gen(u32 event, guest_va_t va) +{ + if ( tb_init_done ) + { + event |= (GUEST_PAGING_LEVELS-2)<<8; + __trace_var(event, 0/*!tsc*/, sizeof(va), (unsigned char*)&va); + } +} + +static inline void trace_shadow_fixup(guest_l1e_t gl1e, + guest_va_t va) +{ + if ( tb_init_done ) + { + struct { + /* for PAE, guest_l1e may be 64 while guest_va may be 32; + so put it first for alignment sake. */ + guest_l1e_t gl1e; + guest_va_t va; + u32 flags; + } __attribute__((packed)) d; + u32 event; + + event = TRC_SHADOW_FIXUP | ((GUEST_PAGING_LEVELS-2)<<8); + + d.gl1e = gl1e; + d.va = va; + d.flags = this_cpu(trace_shadow_path_flags); + + __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d); + } +} + +static inline void trace_not_shadow_fault(guest_l1e_t gl1e, + guest_va_t va) +{ + if ( tb_init_done ) + { + struct { + /* for PAE, guest_l1e may be 64 while guest_va may be 32; + so put it first for alignment sake. */ + guest_l1e_t gl1e; + guest_va_t va; + u32 flags; + } __attribute__((packed)) d; + u32 event; + + event = TRC_SHADOW_NOT_SHADOW | ((GUEST_PAGING_LEVELS-2)<<8); + + d.gl1e = gl1e; + d.va = va; + d.flags = this_cpu(trace_shadow_path_flags); + + __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d); + } +} + +static inline void trace_shadow_emulate_other(u32 event, + guest_va_t va, + gfn_t gfn) +{ + if ( tb_init_done ) + { + struct { + /* for PAE, guest_l1e may be 64 while guest_va may be 32; + so put it first for alignment sake. */ +#if GUEST_PAGING_LEVELS == 2 + u32 gfn; +#else + u64 gfn; +#endif + guest_va_t va; + } __attribute__((packed)) d; + + event |= ((GUEST_PAGING_LEVELS-2)<<8); + + d.gfn=gfn_x(gfn); + d.va = va; + + __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d); + } +} + +#if GUEST_PAGING_LEVELS == 3 +static DEFINE_PER_CPU(guest_va_t,trace_emulate_initial_va); +static DEFINE_PER_CPU(int,trace_extra_emulation_count); +#endif +static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val); + +static inline void trace_shadow_emulate(guest_l1e_t gl1e, unsigned long va) +{ + if ( tb_init_done ) + { + struct { + /* for PAE, guest_l1e may be 64 while guest_va may be 32; + so put it first for alignment sake. */ + guest_l1e_t gl1e, write_val; + guest_va_t va; + unsigned flags:29, emulation_count:3; + } __attribute__((packed)) d; + u32 event; + + event = TRC_SHADOW_EMULATE | ((GUEST_PAGING_LEVELS-2)<<8); + + d.gl1e = gl1e; + d.write_val.l1 = this_cpu(trace_emulate_write_val); + d.va = va; +#if GUEST_PAGING_LEVELS == 3 + d.emulation_count = this_cpu(trace_extra_emulation_count); +#endif + d.flags = this_cpu(trace_shadow_path_flags); + + __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d); + } +} /**************************************************************************/ /* Entry points into the shadow code */ @@ -3028,7 +3164,7 @@ static int sh_page_fault(struct vcpu *v, { struct domain *d = v->domain; walk_t gw; - gfn_t gfn; + gfn_t gfn = 0; mfn_t gmfn, sl1mfn=_mfn(0); shadow_l1e_t sl1e, *ptr_sl1e; paddr_t gpa; @@ -3133,6 +3269,7 @@ static int sh_page_fault(struct vcpu *v, reset_early_unshadow(v); perfc_incr(shadow_fault_fast_gnp); SHADOW_PRINTK("fast path not-present\n"); + trace_shadow_gen(TRC_SHADOW_FAST_PROPAGATE, va); return 0; } else @@ -3146,6 +3283,7 @@ static int sh_page_fault(struct vcpu *v, perfc_incr(shadow_fault_fast_mmio); SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); reset_early_unshadow(v); + trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va); return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) ? EXCRET_fault_fixed : 0); } @@ -3156,6 +3294,7 @@ static int sh_page_fault(struct vcpu *v, * Retry and let the hardware give us the right fault next time. */ perfc_incr(shadow_fault_fast_fail); SHADOW_PRINTK("fast path false alarm!\n"); + trace_shadow_gen(TRC_SHADOW_FALSE_FAST_PATH, va); return EXCRET_fault_fixed; } } @@ -3196,7 +3335,7 @@ static int sh_page_fault(struct vcpu *v, perfc_incr(shadow_fault_bail_real_fault); SHADOW_PRINTK("not a shadow fault\n"); reset_early_unshadow(v); - return 0; + goto propagate; } /* It's possible that the guest has put pagetables in memory that it has @@ -3206,7 +3345,7 @@ static int sh_page_fault(struct vcpu *v, if ( unlikely(d->is_shutting_down) ) { SHADOW_PRINTK("guest is shutting down\n"); - return 0; + goto propagate; } /* What kind of access are we dealing with? */ @@ -3224,7 +3363,7 @@ static int sh_page_fault(struct vcpu *v, SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", gfn_x(gfn), mfn_x(gmfn)); reset_early_unshadow(v); - return 0; + goto propagate; } #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) @@ -3235,6 +3374,8 @@ static int sh_page_fault(struct vcpu *v, shadow_lock(d); + TRACE_CLEAR_PATH_FLAGS; + rc = gw_remove_write_accesses(v, va, &gw); /* First bit set: Removed write access to a page. */ @@ -3287,6 +3428,7 @@ static int sh_page_fault(struct vcpu *v, * Get out of the fault handler immediately. */ ASSERT(d->is_shutting_down); shadow_unlock(d); + trace_shadow_gen(TRC_SHADOW_DOMF_DYING, va); return 0; } @@ -3379,6 +3521,7 @@ static int sh_page_fault(struct vcpu *v, d->arch.paging.log_dirty.fault_count++; reset_early_unshadow(v); + trace_shadow_fixup(gw.l1e, va); done: sh_audit_gw(v, &gw); SHADOW_PRINTK("fixed\n"); @@ -3401,6 +3544,8 @@ static int sh_page_fault(struct vcpu *v, mfn_x(gmfn)); perfc_incr(shadow_fault_emulate_failed); sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */); + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_USER, + va, gfn); goto done; } @@ -3416,6 +3561,8 @@ static int sh_page_fault(struct vcpu *v, sh_audit_gw(v, &gw); shadow_audit_tables(v); shadow_unlock(d); + + this_cpu(trace_emulate_write_val) = 0; #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION early_emulation: @@ -3442,6 +3589,8 @@ static int sh_page_fault(struct vcpu *v, "injection: cr2=%#lx, mfn=%#lx\n", va, mfn_x(gmfn)); sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */); + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ, + va, gfn); return EXCRET_fault_fixed; } } @@ -3474,6 +3623,10 @@ static int sh_page_fault(struct vcpu *v, * to support more operations in the emulator. More likely, * though, this is a hint that this page should not be shadowed. */ shadow_remove_all_shadows(v, gmfn); + + trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED, + va, gfn); + goto emulate_done; } #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION @@ -3500,7 +3653,8 @@ static int sh_page_fault(struct vcpu *v, #if GUEST_PAGING_LEVELS == 3 /* PAE guest */ if ( r == X86EMUL_OKAY ) { - int i; + int i, emulation_count=0; + this_cpu(trace_emulate_initial_va) = va; /* Emulate up to four extra instructions in the hope of catching * the "second half" of a 64-bit pagetable write. */ for ( i = 0 ; i < 4 ; i++ ) @@ -3509,10 +3663,12 @@ static int sh_page_fault(struct vcpu *v, v->arch.paging.last_write_was_pt = 0; r = x86_emulate(&emul_ctxt.ctxt, emul_ops); if ( r == X86EMUL_OKAY ) - { + { + emulation_count++; if ( v->arch.paging.last_write_was_pt ) { perfc_incr(shadow_em_ex_pt); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN); break; /* Don't emulate past the other half of the write */ } else @@ -3521,12 +3677,16 @@ static int sh_page_fault(struct vcpu *v, else { perfc_incr(shadow_em_ex_fail); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_LAST_FAILED); break; /* Don't emulate again if we failed! */ } } + this_cpu(trace_extra_emulation_count)=emulation_count; } #endif /* PAE guest */ + trace_shadow_emulate(gw.l1e, va); + emulate_done: SHADOW_PRINTK("emulated\n"); return EXCRET_fault_fixed; @@ -3539,6 +3699,7 @@ static int sh_page_fault(struct vcpu *v, shadow_audit_tables(v); reset_early_unshadow(v); shadow_unlock(d); + trace_shadow_gen(TRC_SHADOW_MMIO, va); return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) ? EXCRET_fault_fixed : 0); @@ -3548,6 +3709,10 @@ static int sh_page_fault(struct vcpu *v, shadow_audit_tables(v); reset_early_unshadow(v); shadow_unlock(d); + +propagate: + trace_not_shadow_fault(gw.l1e, va); + return 0; } @@ -3986,7 +4151,7 @@ sh_detach_old_tables(struct vcpu *v) sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable); v->arch.paging.shadow.guest_vtable = NULL; } -#endif +#endif // !NDEBUG //// @@ -4442,6 +4607,7 @@ static int sh_guess_wrmap(struct vcpu *v sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW); r = shadow_set_l1e(v, sl1p, sl1e, sl1mfn); ASSERT( !(r & SHADOW_SET_ERROR) ); + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_WRMAP_GUESS_FOUND); return 1; } #endif @@ -4796,7 +4962,7 @@ static void emulate_unmap_dest(struct vc static int sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, void *src, - u32 bytes, struct sh_emulate_ctxt *sh_ctxt) + u32 bytes, struct sh_emulate_ctxt *sh_ctxt) { void *addr; @@ -4810,6 +4976,22 @@ sh_x86_emulate_write(struct vcpu *v, uns shadow_lock(v->domain); memcpy(addr, src, bytes); + + if ( tb_init_done ) + { +#if GUEST_PAGING_LEVELS == 3 + if ( vaddr == this_cpu(trace_emulate_initial_va) ) + memcpy(&this_cpu(trace_emulate_write_val), src, bytes); + else if ( (vaddr & ~(0x7UL)) == this_cpu(trace_emulate_initial_va) ) + { + TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATE_FULL_PT); + memcpy(&this_cpu(trace_emulate_write_val), + (void *)(((unsigned long) addr) & ~(0x7UL)), GUEST_PTE_SIZE); + } +#else + memcpy(&this_cpu(trace_emulate_write_val), src, bytes); +#endif + } emulate_unmap_dest(v, addr, bytes, sh_ctxt); shadow_audit_tables(v); diff -r 7cffee0c978a xen/arch/x86/mm/shadow/private.h --- a/xen/arch/x86/mm/shadow/private.h Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/arch/x86/mm/shadow/private.h Wed Aug 06 12:21:32 2008 +0100 @@ -90,6 +90,40 @@ extern int shadow_audit_enable; #define SHADOW_DEBUG_EMULATE 1 #define SHADOW_DEBUG_P2M 1 #define SHADOW_DEBUG_LOGDIRTY 0 + +/****************************************************************************** + * Tracing + */ +DECLARE_PER_CPU(uint32_t,trace_shadow_path_flags); + +#define TRACE_SHADOW_PATH_FLAG(_x) \ + do { \ + this_cpu(trace_shadow_path_flags) |= (1<<(_x)); \ + } while(0) + +#define TRACE_CLEAR_PATH_FLAGS \ + this_cpu(trace_shadow_path_flags) = 0 + +enum { + TRCE_SFLAG_SET_AD, + TRCE_SFLAG_SET_A, + TRCE_SFLAG_SHADOW_L1_GET_REF, + TRCE_SFLAG_SHADOW_L1_PUT_REF, + TRCE_SFLAG_L2_PROPAGATE, + TRCE_SFLAG_SET_CHANGED, + TRCE_SFLAG_SET_FLUSH, + TRCE_SFLAG_SET_ERROR, + TRCE_SFLAG_DEMOTE, + TRCE_SFLAG_PROMOTE, + TRCE_SFLAG_WRMAP, + TRCE_SFLAG_WRMAP_GUESS_FOUND, + TRCE_SFLAG_WRMAP_BRUTE_FORCE, + TRCE_SFLAG_EARLY_UNSHADOW, + TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN, + TRCE_SFLAG_EMULATION_LAST_FAILED, + TRCE_SFLAG_EMULATE_FULL_PT, + TRCE_SFLAG_PREALLOC_UNHOOK +}; /****************************************************************************** * The shadow lock. @@ -143,6 +177,12 @@ extern int shadow_audit_enable; } while (0) +/* Size (in bytes) of a guest PTE */ +#if GUEST_PAGING_LEVELS >= 3 +# define GUEST_PTE_SIZE 8 +#else +# define GUEST_PTE_SIZE 4 +#endif /****************************************************************************** * Auditing routines diff -r 7cffee0c978a xen/common/trace.c --- a/xen/common/trace.c Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/common/trace.c Wed Aug 06 12:21:32 2008 +0100 @@ -148,6 +148,31 @@ static int tb_set_size(int size) return 0; } +int trace_will_trace_event(u32 event) +{ + if ( !tb_init_done ) + return 0; + + /* + * Copied from __trace_var() + */ + if ( (tb_event_mask & event) == 0 ) + return 0; + + /* match class */ + if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 ) + return 0; + + /* then match subclass */ + if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf ) + & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 ) + return 0; + + if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) ) + return 0; + + return 1; +} /** * init_trace_bufs - performs initialization of the per-cpu trace buffers. @@ -407,7 +432,8 @@ void __trace_var(u32 event, int cycles, int extra_word; int started_below_highwater; - ASSERT(tb_init_done); + if( !tb_init_done ) + return; /* Convert byte count into word count, rounding up */ extra_word = (extra / sizeof(u32)); diff -r 7cffee0c978a xen/include/public/trace.h --- a/xen/include/public/trace.h Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/include/public/trace.h Wed Aug 06 12:21:32 2008 +0100 @@ -37,6 +37,7 @@ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ +#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) @@ -92,6 +93,20 @@ #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_64_FLAG (0x100) + +#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) +#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) +#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) +#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) +#define TRC_SHADOW_MMIO (TRC_SHADOW + 5) +#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) +#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) +#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) +#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) +#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) +#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) +#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) +#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) diff -r 7cffee0c978a xen/include/xen/trace.h --- a/xen/include/xen/trace.h Tue Aug 05 14:12:47 2008 +0100 +++ b/xen/include/xen/trace.h Wed Aug 06 12:21:32 2008 +0100 @@ -33,6 +33,8 @@ void init_trace_bufs(void); /* used to retrieve the physical address of the trace buffers */ int tb_control(struct xen_sysctl_tbuf_op *tbc); + +int trace_will_trace_event(u32 event); void __trace_var(u32 event, int cycles, int extra, unsigned char *extra_data);