# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1192094468 -3600
# Node ID 3674da19741ac406ebc9ee497a92ec852bad70ee
# Parent f0caa61bc53b4b56cb7f9f288517938ebb56b1df
More static shadow functions.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset: 16091:19a843def5fdd3853b0b5372c512a5c623954ae7
xen-unstable date: Thu Oct 11 10:21:08 2007 +0100
---
xen/arch/x86/mm/shadow/common.c | 8 ++++----
xen/arch/x86/mm/shadow/private.h | 8 --------
2 files changed, 4 insertions(+), 12 deletions(-)
diff -r f0caa61bc53b -r 3674da19741a xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Thu Oct 11 10:20:45 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Thu Oct 11 10:21:08 2007 +0100
@@ -729,7 +729,7 @@ int shadow_cmpxchg_guest_entry(struct vc
* at the same time, which means that to guarantee progress, we must
* allow for more than ninety allocated pages per vcpu. We round that
* up to 128 pages, or half a megabyte per vcpu. */
-unsigned int shadow_min_acceptable_pages(struct domain *d)
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
{
u32 vcpu_count = 0;
struct vcpu *v;
@@ -784,7 +784,7 @@ static inline int chunk_is_available(str
/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
+static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
{
struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
switch ( sp->type )
@@ -1124,7 +1124,7 @@ sh_alloc_p2m_pages(struct domain *d)
}
// Returns 0 if no memory is available...
-struct page_info *
+static struct page_info *
shadow_alloc_p2m_page(struct domain *d)
{
struct list_head *entry;
@@ -1154,7 +1154,7 @@ shadow_alloc_p2m_page(struct domain *d)
return pg;
}
-void
+static void
shadow_free_p2m_page(struct domain *d, struct page_info *pg)
{
ASSERT(page_get_owner(pg) == d);
diff -r f0caa61bc53b -r 3674da19741a xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h Thu Oct 11 10:20:45 2007 +0100
+++ b/xen/arch/x86/mm/shadow/private.h Thu Oct 11 10:21:08 2007 +0100
@@ -363,10 +363,6 @@ void shadow_free(struct domain *d, mfn_
/* Function to convert a shadow to log-dirty */
void shadow_convert_to_log_dirty(struct vcpu *v, mfn_t smfn);
-/* Dispatcher function: call the per-mode function that will unhook the
- * non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn);
-
/* Install the xen mappings in various flavours of shadow */
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
@@ -385,10 +381,6 @@ extern int sh_remove_write_access(struct
extern int sh_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
unsigned int level,
unsigned long fault_addr);
-
-/* Allocate/free functions for passing to the P2M code. */
-struct page_info *shadow_alloc_p2m_page(struct domain *d);
-void shadow_free_p2m_page(struct domain *d, struct page_info *pg);
/* Functions that atomically write PT/P2M entries and update state */
void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|