What can be static generally also should be.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2007-10-10/xen/arch/x86/mm/shadow/common.c
===================================================================
--- 2007-10-10.orig/xen/arch/x86/mm/shadow/common.c 2007-10-11
10:53:12.000000000 +0200
+++ 2007-10-10/xen/arch/x86/mm/shadow/common.c 2007-10-11 10:53:37.000000000
+0200
@@ -667,7 +667,7 @@ int shadow_cmpxchg_guest_entry(struct vc
* at the same time, which means that to guarantee progress, we must
* allow for more than ninety allocated pages per vcpu. We round that
* up to 128 pages, or half a megabyte per vcpu. */
-unsigned int shadow_min_acceptable_pages(struct domain *d)
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
{
u32 vcpu_count = 0;
struct vcpu *v;
@@ -722,7 +722,7 @@ static inline int chunk_is_available(str
/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
+static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
{
struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
switch ( sp->type )
@@ -1062,7 +1062,7 @@ sh_alloc_p2m_pages(struct domain *d)
}
// Returns 0 if no memory is available...
-struct page_info *
+static struct page_info *
shadow_alloc_p2m_page(struct domain *d)
{
struct list_head *entry;
@@ -1092,7 +1092,7 @@ shadow_alloc_p2m_page(struct domain *d)
return pg;
}
-void
+static void
shadow_free_p2m_page(struct domain *d, struct page_info *pg)
{
ASSERT(page_get_owner(pg) == d);
Index: 2007-10-10/xen/arch/x86/mm/shadow/private.h
===================================================================
--- 2007-10-10.orig/xen/arch/x86/mm/shadow/private.h 2007-10-11
10:53:12.000000000 +0200
+++ 2007-10-10/xen/arch/x86/mm/shadow/private.h 2007-10-10 16:57:56.000000000
+0200
@@ -360,10 +360,6 @@ mfn_t shadow_alloc(struct domain *d,
unsigned long backpointer);
void shadow_free(struct domain *d, mfn_t smfn);
-/* Dispatcher function: call the per-mode function that will unhook the
- * non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn);
-
/* Install the xen mappings in various flavours of shadow */
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
@@ -383,10 +379,6 @@ extern int sh_remove_write_access(struct
unsigned int level,
unsigned long fault_addr);
-/* Allocate/free functions for passing to the P2M code. */
-struct page_info *shadow_alloc_p2m_page(struct domain *d);
-void shadow_free_p2m_page(struct domain *d, struct page_info *pg);
-
/* Functions that atomically write PT/P2M entries and update state */
void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
l1_pgentry_t *p, mfn_t table_mfn,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|