# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 8fb4392c1d87c0b2a003d290e6d6d0541230c1c7
# Parent 8946b6dcd49e017bc51f2a535c2aa83d54bbf1e6
Change shadow_direct_map_init/clean param from vcpu to domain.
Also some cleanups.
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/hvm/hvm.c Wed Feb 22 18:23:35 2006
@@ -190,8 +190,10 @@
{
struct hvm_domain *platform;
- if (!(HVM_DOMAIN(current) && (current->vcpu_id == 0)))
+ if ( !HVM_DOMAIN(current) || (current->vcpu_id != 0) )
return;
+
+ shadow_direct_map_init(d);
hvm_map_io_shared_page(d);
hvm_get_info(d);
@@ -200,7 +202,8 @@
pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
register_pic_io_hook();
- if ( hvm_apic_support(d) ) {
+ if ( hvm_apic_support(d) )
+ {
spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
hvm_vioapic_init(d);
}
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Feb 22 18:23:35 2006
@@ -797,12 +797,13 @@
free_host_save_area(v->arch.hvm_svm.host_save_area);
#endif
- if (v->vcpu_id == 0) {
+ if ( v->vcpu_id == 0 )
+ {
/* unmap IO shared page */
struct domain *d = v->domain;
- if (d->arch.hvm_domain.shared_page_va)
+ if ( d->arch.hvm_domain.shared_page_va )
unmap_domain_page((void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(v);
+ shadow_direct_map_clean(d);
}
destroy_vmcb(&v->arch.hvm_svm);
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Feb 22 18:23:35 2006
@@ -443,8 +443,6 @@
pt = pagetable_get_paddr(v->domain->arch.phys_table);
printk("%s: phys_table = %lx\n", __func__, pt);
}
-
- shadow_direct_map_init(v);
if ( svm_paging_enabled(v) )
vmcb->cr3 = pagetable_get_paddr(v->arch.guest_table);
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 22 18:23:35 2006
@@ -230,7 +230,6 @@
error |= __vmwrite(GUEST_TR_BASE, 0);
error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
- shadow_direct_map_init(v);
__vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
__vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 22 18:23:35 2006
@@ -81,14 +81,14 @@
void vmx_relinquish_resources(struct vcpu *v)
{
struct hvm_virpit *vpit;
-
+
if (v->vcpu_id == 0) {
/* unmap IO shared page */
struct domain *d = v->domain;
if ( d->arch.hvm_domain.shared_page_va )
unmap_domain_page_global(
(void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(v);
+ shadow_direct_map_clean(d);
}
vmx_request_clear_vmcs(v);
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/shadow.c Wed Feb 22 18:23:35 2006
@@ -3950,11 +3950,11 @@
__direct_get_l3e(v, vpa, &sl3e);
- if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) )
+ if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) )
{
page = alloc_domheap_page(NULL);
if ( !page )
- goto nomem;
+ goto nomem;
smfn = page_to_mfn(page);
sl3e = l3e_from_pfn(smfn, _PAGE_PRESENT);
@@ -3968,11 +3968,11 @@
__direct_get_l2e(v, vpa, &sl2e);
- if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
+ if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
page = alloc_domheap_page(NULL);
if ( !page )
- goto nomem;
+ goto nomem;
smfn = page_to_mfn(page);
sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
@@ -3985,11 +3985,11 @@
__direct_get_l1e(v, vpa, &sl1e);
- if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) )
+ if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) )
{
sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
__direct_set_l1e(v, vpa, &sl1e);
- }
+ }
shadow_unlock(d);
return EXCRET_fault_fixed;
@@ -3998,7 +3998,7 @@
return 0;
nomem:
- shadow_direct_map_clean(v);
+ shadow_direct_map_clean(d);
domain_crash_synchronous();
}
#endif
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/shadow32.c Wed Feb 22 18:23:35 2006
@@ -1044,7 +1044,7 @@
}
shadow_lock(d);
-
+
__direct_get_l2e(v, vpa, &sl2e);
if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
@@ -1059,7 +1059,7 @@
sple = (l1_pgentry_t *)map_domain_page(smfn);
memset(sple, 0, PAGE_SIZE);
__direct_set_l2e(v, vpa, sl2e);
- }
+ }
if ( !sple )
sple = (l1_pgentry_t *)map_domain_page(l2e_get_pfn(sl2e));
@@ -1082,36 +1082,32 @@
return 0;
nomem:
- shadow_direct_map_clean(v);
+ shadow_direct_map_clean(d);
domain_crash_synchronous();
}
-int shadow_direct_map_init(struct vcpu *v)
+int shadow_direct_map_init(struct domain *d)
{
struct page_info *page;
l2_pgentry_t *root;
if ( !(page = alloc_domheap_page(NULL)) )
- goto fail;
+ return 0;
root = map_domain_page(page_to_mfn(page));
memset(root, 0, PAGE_SIZE);
unmap_domain_page(root);
- v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
+ d->arch.phys_table = mk_pagetable(page_to_maddr(page));
return 1;
-
-fail:
- return 0;
-}
-
-void shadow_direct_map_clean(struct vcpu *v)
+}
+
+void shadow_direct_map_clean(struct domain *d)
{
int i;
unsigned long mfn;
- struct domain *d = v->domain;
l2_pgentry_t *l2e;
mfn = pagetable_get_pfn(d->arch.phys_table);
@@ -1143,7 +1139,7 @@
if(!new_modes) /* Nothing to do - return success */
return 0;
-
+
// can't take anything away by calling this function.
ASSERT(!(d->arch.shadow_mode & ~mode));
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Wed Feb 22 17:26:39 2006
+++ b/xen/arch/x86/shadow_public.c Wed Feb 22 18:23:35 2006
@@ -36,31 +36,27 @@
#define SHADOW_MAX_GUEST32(_encoded) ((L1_PAGETABLE_ENTRIES_32 - 1) -
((_encoded) >> 16))
-int shadow_direct_map_init(struct vcpu *v)
+int shadow_direct_map_init(struct domain *d)
{
struct page_info *page;
l3_pgentry_t *root;
if ( !(page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA)) )
- goto fail;
+ return 0;
root = map_domain_page(page_to_mfn(page));
memset(root, 0, PAGE_SIZE);
root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
- v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
+ d->arch.phys_table = mk_pagetable(page_to_maddr(page));
unmap_domain_page(root);
return 1;
-
-fail:
- return 0;
-}
-
-void shadow_direct_map_clean(struct vcpu *v)
+}
+
+void shadow_direct_map_clean(struct domain *d)
{
unsigned long mfn;
- struct domain *d = v->domain;
l2_pgentry_t *l2e;
l3_pgentry_t *l3e;
int i, j;
diff -r 8946b6dcd49e -r 8fb4392c1d87 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Wed Feb 22 17:26:39 2006
+++ b/xen/include/asm-x86/shadow.h Wed Feb 22 18:23:35 2006
@@ -115,8 +115,8 @@
#define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1)
- (_max)) << 16) | (_min))
#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
#define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded)
>> 16))
-extern void shadow_direct_map_clean(struct vcpu *v);
-extern int shadow_direct_map_init(struct vcpu *v);
+extern void shadow_direct_map_clean(struct domain *d);
+extern int shadow_direct_map_init(struct domain *d);
extern int shadow_direct_map_fault(
unsigned long vpa, struct cpu_user_regs *regs);
extern void shadow_mode_init(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|