# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1292410310 0
# Node ID dd5fbb5b7a43c7509a08849b65c0d1921af5fe05
# Parent 01f3b350902385627d1fa9e8cd1c231953e7610c
x86/mm: move mfn_is_dirty along with the rest of the log-dirty code
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
diff -r 01f3b3509023 -r dd5fbb5b7a43 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c Wed Dec 15 10:27:18 2010 +0000
+++ b/xen/arch/x86/mm/paging.c Wed Dec 15 10:51:50 2010 +0000
@@ -304,6 +304,62 @@ void paging_mark_dirty(struct domain *d,
log_dirty_unlock(d);
}
+
+/* Is this guest page dirty? */
+int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
+{
+ unsigned long pfn;
+ mfn_t mfn, *l4, *l3, *l2;
+ unsigned long *l1;
+ int rv;
+
+ ASSERT(paging_mode_log_dirty(d));
+
+ /* We /really/ mean PFN here, even for non-translated guests. */
+ pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+ /* Page sharing not supported for shadow domains */
+ BUG_ON(SHARED_M2P(pfn));
+ if ( unlikely(!VALID_M2P(pfn)) )
+ return 0;
+
+ if ( d->arch.paging.log_dirty.failed_allocs > 0 )
+ /* If we have any failed allocations our dirty log is bogus.
+ * Since we can't signal an error here, be conservative and
+ * report "dirty" in this case. (The only current caller,
+ * _sh_propagate, leaves known-dirty pages writable, preventing
+ * subsequent dirty-logging faults from them.)
+ */
+ return 1;
+
+ l4 = paging_map_log_dirty_bitmap(d);
+ if ( !l4 )
+ return 0;
+
+ mfn = l4[L4_LOGDIRTY_IDX(pfn)];
+ unmap_domain_page(l4);
+ if ( !mfn_valid(mfn) )
+ return 0;
+
+ l3 = map_domain_page(mfn_x(mfn));
+ mfn = l3[L3_LOGDIRTY_IDX(pfn)];
+ unmap_domain_page(l3);
+ if ( !mfn_valid(mfn) )
+ return 0;
+
+ l2 = map_domain_page(mfn_x(mfn));
+ mfn = l2[L2_LOGDIRTY_IDX(pfn)];
+ unmap_domain_page(l2);
+ if ( !mfn_valid(mfn) )
+ return 0;
+
+ l1 = map_domain_page(mfn_x(mfn));
+ rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
+ unmap_domain_page(l1);
+
+ return rv;
+}
+
+
/* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
* clear the bitmap and stats as well. */
int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
diff -r 01f3b3509023 -r dd5fbb5b7a43 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Dec 15 10:27:18 2010 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Dec 15 10:51:50 2010 +0000
@@ -657,7 +657,7 @@ _sh_propagate(struct vcpu *v,
if ( mfn_valid(target_mfn) ) {
if ( ft & FETCH_TYPE_WRITE )
paging_mark_dirty(d, mfn_x(target_mfn));
- else if ( !sh_mfn_is_dirty(d, target_mfn) )
+ else if ( !paging_mfn_is_dirty(d, target_mfn) )
sflags &= ~_PAGE_RW;
}
}
diff -r 01f3b3509023 -r dd5fbb5b7a43 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h Wed Dec 15 10:27:18 2010 +0000
+++ b/xen/arch/x86/mm/shadow/private.h Wed Dec 15 10:51:50 2010 +0000
@@ -568,67 +568,6 @@ sh_unmap_domain_page_global(void *p)
unmap_domain_page_global(p);
}
-/******************************************************************************
- * Log-dirty mode bitmap handling
- */
-
-extern void sh_mark_dirty(struct domain *d, mfn_t gmfn);
-
-static inline int
-sh_mfn_is_dirty(struct domain *d, mfn_t gmfn)
-/* Is this guest page dirty? Call only in log-dirty mode. */
-{
- unsigned long pfn;
- mfn_t mfn, *l4, *l3, *l2;
- unsigned long *l1;
- int rv;
-
- ASSERT(shadow_mode_log_dirty(d));
-
- /* We /really/ mean PFN here, even for non-translated guests. */
- pfn = get_gpfn_from_mfn(mfn_x(gmfn));
- /* Page sharing not supported for shadow domains */
- BUG_ON(SHARED_M2P(pfn));
- if ( unlikely(!VALID_M2P(pfn)) )
- return 0;
-
- if ( d->arch.paging.log_dirty.failed_allocs > 0 )
- /* If we have any failed allocations our dirty log is bogus.
- * Since we can't signal an error here, be conservative and
- * report "dirty" in this case. (The only current caller,
- * _sh_propagate, leaves known-dirty pages writable, preventing
- * subsequent dirty-logging faults from them.)
- */
- return 1;
-
- l4 = paging_map_log_dirty_bitmap(d);
- if ( !l4 )
- return 0;
-
- mfn = l4[L4_LOGDIRTY_IDX(pfn)];
- unmap_domain_page(l4);
- if ( !mfn_valid(mfn) )
- return 0;
-
- l3 = map_domain_page(mfn_x(mfn));
- mfn = l3[L3_LOGDIRTY_IDX(pfn)];
- unmap_domain_page(l3);
- if ( !mfn_valid(mfn) )
- return 0;
-
- l2 = map_domain_page(mfn_x(mfn));
- mfn = l2[L2_LOGDIRTY_IDX(pfn)];
- unmap_domain_page(l2);
- if ( !mfn_valid(mfn) )
- return 0;
-
- l1 = map_domain_page(mfn_x(mfn));
- rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
- unmap_domain_page(l1);
-
- return rv;
-}
-
/**************************************************************************/
/* Shadow-page refcounting. */
diff -r 01f3b3509023 -r dd5fbb5b7a43 xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h Wed Dec 15 10:27:18 2010 +0000
+++ b/xen/include/asm-x86/paging.h Wed Dec 15 10:51:50 2010 +0000
@@ -161,6 +161,9 @@ void paging_log_dirty_init(struct domain
/* mark a page as dirty */
void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
+/* is this guest page dirty? */
+int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
+
/*
* Log-dirty radix tree indexing:
* All tree nodes are PAGE_SIZE bytes, mapped on-demand.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|