# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID f328519053f5a444af475ec10dc8089a0b176e3f
# Parent eb66b68db7b10d09a3298c28b574695e73e1cf1c
[XEN] Remove sync_pagetable_state(). No longer needed.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/mm.c | 31 -------------------------------
xen/arch/x86/traps.c | 3 ---
xen/common/domain.c | 6 ------
xen/common/grant_table.c | 2 --
xen/include/asm-ia64/mm.h | 2 --
xen/include/asm-powerpc/mm.h | 4 +---
xen/include/asm-x86/mm.h | 2 --
7 files changed, 1 insertion(+), 49 deletions(-)
diff -r eb66b68db7b1 -r f328519053f5 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/arch/x86/mm.c Mon Aug 14 10:58:02 2006 +0100
@@ -1657,26 +1657,6 @@ int get_page_type(struct page_info *page
{
if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
{
- if ( (current->domain == page_get_owner(page)) &&
- ((x & PGT_type_mask) == PGT_writable_page) )
- {
- /*
- * This ensures functions like set_gdt() see up-to-date
- * type info without needing to clean up writable p.t.
- * state on the fast path. We take this path only
- * when the current type is writable because:
- * 1. It's the only type that this path can decrement.
- * 2. If we take this path more liberally then we can
- * enter a recursive loop via get_page_from_l1e()
- * during pagetable revalidation.
- */
- sync_pagetable_state(current->domain);
- y = page->u.inuse.type_info;
- /* Can we make progress now? */
- if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
- ((y & PGT_count_mask) == 0) )
- goto again;
- }
if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
((type & PGT_type_mask) != PGT_l1_page_table) )
MEM_LOG("Bad type (saw %" PRtype_info
@@ -1937,8 +1917,6 @@ int do_mmuext_op(
LOCK_BIGLOCK(d);
- sync_pagetable_state(d);
-
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
count &= ~MMU_UPDATE_PREEMPTED;
@@ -2189,8 +2167,6 @@ int do_mmu_update(
struct domain_mmap_cache mapcache, sh_mapcache;
LOCK_BIGLOCK(d);
-
- sync_pagetable_state(d);
if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(v, "pre-mmu"); /* debug */
@@ -2701,8 +2677,6 @@ int do_update_va_mapping(unsigned long v
LOCK_BIGLOCK(d);
- sync_pagetable_state(d);
-
if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(v, "pre-va"); /* debug */
@@ -3335,11 +3309,6 @@ int ptwr_do_page_fault(struct domain *d,
bail:
UNLOCK_BIGLOCK(d);
return 0;
-}
-
-void sync_pagetable_state(struct domain *d)
-{
- shadow_sync_all(d);
}
int map_pages_to_xen(
diff -r eb66b68db7b1 -r f328519053f5 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/arch/x86/traps.c Mon Aug 14 10:58:02 2006 +0100
@@ -713,7 +713,6 @@ static int handle_gdt_ldt_mapping_fault(
{
/* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
LOCK_BIGLOCK(d);
- sync_pagetable_state(d);
ret = map_ldt_shadow_page(offset >> PAGE_SHIFT);
UNLOCK_BIGLOCK(d);
@@ -849,7 +848,6 @@ static int spurious_page_fault(
int is_spurious;
LOCK_BIGLOCK(d);
- sync_pagetable_state(d);
is_spurious = __spurious_page_fault(addr, regs);
UNLOCK_BIGLOCK(d);
@@ -1302,7 +1300,6 @@ static int emulate_privileged_op(struct
case 3: /* Write CR3 */
LOCK_BIGLOCK(v->domain);
- sync_pagetable_state(v->domain);
(void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
UNLOCK_BIGLOCK(v->domain);
break;
diff -r eb66b68db7b1 -r f328519053f5 xen/common/domain.c
--- a/xen/common/domain.c Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/common/domain.c Mon Aug 14 10:58:02 2006 +0100
@@ -266,8 +266,6 @@ static void domain_shutdown_finalise(voi
vcpu_sleep_sync(v);
BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
- sync_pagetable_state(d);
-
/* Don't set DOMF_shutdown until execution contexts are sync'ed. */
if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) )
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
@@ -406,8 +404,6 @@ void domain_pause(struct domain *d)
for_each_vcpu( d, v )
vcpu_sleep_sync(v);
-
- sync_pagetable_state(d);
}
void domain_unpause(struct domain *d)
@@ -439,8 +435,6 @@ void domain_pause_by_systemcontroller(st
for_each_vcpu ( d, v )
vcpu_sleep_sync(v);
}
-
- sync_pagetable_state(d);
}
void domain_unpause_by_systemcontroller(struct domain *d)
diff -r eb66b68db7b1 -r f328519053f5 xen/common/grant_table.c
--- a/xen/common/grant_table.c Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/common/grant_table.c Mon Aug 14 10:58:02 2006 +0100
@@ -942,8 +942,6 @@ do_grant_table_op(
LOCK_BIGLOCK(d);
- sync_pagetable_state(d);
-
rc = -EFAULT;
switch ( cmd )
{
diff -r eb66b68db7b1 -r f328519053f5 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/include/asm-ia64/mm.h Mon Aug 14 10:58:02 2006 +0100
@@ -500,6 +500,4 @@ int steal_page(
int steal_page(
struct domain *d, struct page_info *page, unsigned int memflags);
-#define sync_pagetable_state(d) ((void)0)
-
#endif /* __ASM_IA64_MM_H__ */
diff -r eb66b68db7b1 -r f328519053f5 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/include/asm-powerpc/mm.h Mon Aug 14 10:58:02 2006 +0100
@@ -224,6 +224,4 @@ extern int steal_page(struct domain *d,
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
-#define sync_pagetable_state(d) ((void)0)
-
-#endif
+#endif
diff -r eb66b68db7b1 -r f328519053f5 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Mon Aug 14 10:47:59 2006 +0100
+++ b/xen/include/asm-x86/mm.h Mon Aug 14 10:58:02 2006 +0100
@@ -312,8 +312,6 @@ int ptwr_do_page_fault(struct domain *,
struct cpu_user_regs *);
int revalidate_l1(struct domain *, l1_pgentry_t *, l1_pgentry_t *);
-void sync_pagetable_state(struct domain *d);
-
int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
#ifndef NDEBUG
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|