ChangeSet 1.1236.33.2, 2005/03/15 10:12:39+00:00, mafetter@xxxxxxxxxxxxxxxx
Make validate_(pte|pde)_changes a litter smarter.
Avoid some unnecessary calls to __shadow_status.
Added an early out for __shadow_status.
Signed-off-by: michael.fetterman@xxxxxxxxxxxx
arch/x86/audit.c | 12 +------
arch/x86/shadow.c | 4 +-
include/asm-x86/mm.h | 18 -----------
include/asm-x86/shadow.h | 73 +++++++++++++++++++++++++++++++++++++++--------
include/xen/perfc_defn.h | 9 +++--
5 files changed, 72 insertions(+), 44 deletions(-)
diff -Nru a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c 2005-04-05 12:08:14 -04:00
+++ b/xen/arch/x86/audit.c 2005-04-05 12:08:14 -04:00
@@ -25,25 +25,17 @@
#include <xen/kernel.h>
#include <xen/lib.h>
#include <xen/mm.h>
-//#include <xen/sched.h>
-//#include <xen/errno.h>
#include <xen/perfc.h>
-//#include <xen/irq.h>
-//#include <xen/softirq.h>
#include <asm/shadow.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
-//#include <asm/io.h>
-//#include <asm/uaccess.h>
-//#include <asm/domain_page.h>
-//#include <asm/ldt.h>
// XXX SMP bug -- these should not be statics...
//
static int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0;
static int l1, l2, oos_count, page_count;
-#define FILE_AND_LINE 1
+#define FILE_AND_LINE 0
#if FILE_AND_LINE
#define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__)
@@ -73,7 +65,7 @@
if ( page_get_owner(page) == NULL )
{
APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d) owner=NULL",
- page_to_pfn(page), dir, adjtype, file, line);
+ page_to_pfn(page), dir, adjtype);
errors++;
}
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c 2005-04-05 12:08:14 -04:00
+++ b/xen/arch/x86/shadow.c 2005-04-05 12:08:14 -04:00
@@ -60,7 +60,7 @@
__shadow_sync_mfn(d, gmfn);
}
- if ( unlikely(mfn_is_page_table(gmfn)) )
+ if ( unlikely(page_is_page_table(page)) )
{
min_type = shadow_max_pgtable_type(d, gpfn) + PGT_l1_shadow;
max_type = new_type;
@@ -99,7 +99,7 @@
if ( get_page_type(page, PGT_base_page_table) )
{
put_page_type(page);
- set_bit(_PGC_page_table, &frame_table[gmfn].count_info);
+ set_bit(_PGC_page_table, &page->count_info);
}
else
{
diff -Nru a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h 2005-04-05 12:08:14 -04:00
+++ b/xen/include/asm-x86/mm.h 2005-04-05 12:08:14 -04:00
@@ -129,8 +129,6 @@
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
-#define page_out_of_sync(_p) ((_p)->count_info & PGC_out_of_sync)
-
#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
do { \
page_set_owner((_pfn), (_dom)); \
@@ -233,22 +231,6 @@
}
return rc;
-}
-
-static inline int mfn_is_page_table(unsigned long mfn)
-{
- if ( !pfn_is_ram(mfn) )
- return 0;
-
- return frame_table[mfn].count_info & PGC_page_table;
-}
-
-static inline int page_is_page_table(struct pfn_info *page)
-{
- if ( !pfn_is_ram(page_to_pfn(page)) )
- return 0;
-
- return page->count_info & PGC_page_table;
}
#define ASSERT_PAGE_IS_TYPE(_p, _t) \
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h 2005-04-05 12:08:14 -04:00
+++ b/xen/include/asm-x86/shadow.h 2005-04-05 12:08:14 -04:00
@@ -68,6 +68,33 @@
extern void vmx_shadow_clear_state(struct domain *);
+static inline int page_is_page_table(struct pfn_info *page)
+{
+ return page->count_info & PGC_page_table;
+}
+
+static inline int mfn_is_page_table(unsigned long mfn)
+{
+ if ( !pfn_is_ram(mfn) )
+ return 0;
+
+ return frame_table[mfn].count_info & PGC_page_table;
+}
+
+static inline int page_out_of_sync(struct pfn_info *page)
+{
+ return page->count_info & PGC_out_of_sync;
+}
+
+static inline int mfn_out_of_sync(unsigned long mfn)
+{
+ if ( !pfn_is_ram(mfn) )
+ return 0;
+
+ return frame_table[mfn].count_info & PGC_out_of_sync;
+}
+
+
/************************************************************************/
static void inline
@@ -565,9 +592,10 @@
static inline void l2pde_propagate_from_guest(
struct domain *d, unsigned long *gpde_p, unsigned long *spde_p)
{
- unsigned long gpde = *gpde_p, sl1mfn;
+ unsigned long gpde = *gpde_p, sl1mfn = 0;
- sl1mfn = __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
+ if ( gpde & _PAGE_PRESENT )
+ sl1mfn = __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
l2pde_general(d, gpde_p, spde_p, sl1mfn);
}
@@ -583,7 +611,7 @@
{
unsigned long old_spte, new_spte;
- perfc_incrc(validate_pte_change);
+ perfc_incrc(validate_pte_calls);
#if 0
FSH_LOG("validate_pte(old=%p new=%p)\n", old_pte, new_pte);
@@ -595,8 +623,11 @@
// only do the ref counting if something important changed.
//
- if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) )
+ if ( ((old_spte | new_spte) & _PAGE_PRESENT ) &&
+ ((old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT)) )
{
+ perfc_incrc(validate_pte_changes);
+
if ( new_spte & _PAGE_PRESENT )
shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d);
if ( old_spte & _PAGE_PRESENT )
@@ -618,15 +649,18 @@
unsigned long old_spde = *shadow_pde_p;
unsigned long new_spde;
- perfc_incrc(validate_pde_change);
+ perfc_incrc(validate_pde_calls);
l2pde_propagate_from_guest(d, &new_pde, shadow_pde_p);
new_spde = *shadow_pde_p;
// only do the ref counting if something important changed.
//
- if ( (old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT) )
+ if ( ((old_spde | new_spde) & _PAGE_PRESENT) &&
+ ((old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT)) )
{
+ perfc_incrc(validate_pde_changes);
+
if ( new_spde & _PAGE_PRESENT )
get_shadow_ref(new_spde >> PAGE_SHIFT);
if ( old_spde & _PAGE_PRESENT )
@@ -720,16 +754,12 @@
* It returns the shadow's mfn, or zero if it doesn't exist.
*/
-static inline unsigned long __shadow_status(
+static inline unsigned long ___shadow_status(
struct domain *d, unsigned long gpfn, unsigned long stype)
{
struct shadow_status *p, *x, *head;
unsigned long key = gpfn | stype;
- ASSERT(spin_is_locked(&d->arch.shadow_lock));
- ASSERT(gpfn == (gpfn & PGT_mfn_mask));
- ASSERT(stype && !(stype & ~PGT_type_mask));
-
perfc_incrc(shadow_status_calls);
x = head = hash_bucket(d, gpfn);
@@ -777,6 +807,27 @@
SH_VVLOG("lookup gpfn=%p => status=0", key);
perfc_incrc(shadow_status_miss);
return 0;
+}
+
+static inline unsigned long __shadow_status(
+ struct domain *d, unsigned long gpfn, unsigned long stype)
+{
+ unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
+
+ ASSERT(spin_is_locked(&d->arch.shadow_lock));
+ ASSERT(gpfn == (gpfn & PGT_mfn_mask));
+ ASSERT(stype && !(stype & ~PGT_type_mask));
+
+ if ( gmfn && ((stype != PGT_snapshot)
+ ? !mfn_is_page_table(gmfn)
+ : !mfn_out_of_sync(gmfn)) )
+ {
+ perfc_incrc(shadow_status_shortcut);
+ ASSERT(___shadow_status(d, gpfn, stype) == 0);
+ return 0;
+ }
+
+ return ___shadow_status(d, gmfn, stype);
}
/*
diff -Nru a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
--- a/xen/include/xen/perfc_defn.h 2005-04-05 12:08:14 -04:00
+++ b/xen/include/xen/perfc_defn.h 2005-04-05 12:08:14 -04:00
@@ -38,7 +38,8 @@
PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
PERFSTATUS( snapshot_pages, "current # fshadow snapshot pages" )
-PERFCOUNTER_CPU(shadow_status_calls, "calls to __shadow_status" )
+PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache")
+PERFCOUNTER_CPU(shadow_status_calls, "calls to ___shadow_status" )
PERFCOUNTER_CPU(shadow_status_miss, "missed shadow cache" )
PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" )
PERFCOUNTER_CPU(check_pagetable, "calls to check_pagetable" )
@@ -59,5 +60,7 @@
PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not
present")
PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "sf bailed due to a ro
mapping")
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|