Since all memory is visible to Xen on x86-64, there is no need to allocate
from the special Xen heap for allocations specific to this subarch.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2006-09-21/xen/arch/x86/domain.c
===================================================================
--- 2006-09-21.orig/xen/arch/x86/domain.c 2006-09-21 11:09:00.000000000
+0200
+++ 2006-09-21/xen/arch/x86/domain.c 2006-09-21 13:26:07.000000000 +0200
@@ -167,6 +167,9 @@ void free_vcpu_struct(struct vcpu *v)
int arch_domain_create(struct domain *d)
{
l1_pgentry_t gdt_l1e;
+#ifdef __x86_64__
+ struct page_info *pg;
+#endif
int vcpuid, pdpt_order;
int i, rc = -ENOMEM;
@@ -194,19 +197,19 @@ int arch_domain_create(struct domain *d)
#else /* __x86_64__ */
- d->arch.mm_perdomain_l2 = alloc_xenheap_page();
- d->arch.mm_perdomain_l3 = alloc_xenheap_page();
- if ( (d->arch.mm_perdomain_l2 == NULL) ||
- (d->arch.mm_perdomain_l3 == NULL) )
+ pg = alloc_domheap_page(NULL);
+ if ( !pg )
goto fail;
-
- memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
+ d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg));
for ( i = 0; i < (1 << pdpt_order); i++ )
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] =
l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
__PAGE_HYPERVISOR);
- memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
+ pg = alloc_domheap_page(NULL);
+ if ( !pg )
+ goto fail;
+ d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg));
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
__PAGE_HYPERVISOR);
@@ -240,8 +243,8 @@ int arch_domain_create(struct domain *d)
fail:
free_xenheap_page(d->shared_info);
#ifdef __x86_64__
- free_xenheap_page(d->arch.mm_perdomain_l2);
- free_xenheap_page(d->arch.mm_perdomain_l3);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
return rc;
@@ -265,8 +268,8 @@ void arch_domain_destroy(struct domain *
get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));
#ifdef __x86_64__
- free_xenheap_page(d->arch.mm_perdomain_l2);
- free_xenheap_page(d->arch.mm_perdomain_l3);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
free_xenheap_page(d->shared_info);
Index: 2006-09-21/xen/arch/x86/x86_64/mm.c
===================================================================
--- 2006-09-21.orig/xen/arch/x86/x86_64/mm.c 2006-09-18 11:37:57.000000000
+0200
+++ 2006-09-21/xen/arch/x86/x86_64/mm.c 2006-09-21 13:23:19.000000000 +0200
@@ -82,11 +82,10 @@ void __init paging_init(void)
struct page_info *pg;
/* Create user-accessible L2 directory to map the MPT for guests. */
- l3_ro_mpt = alloc_xenheap_page();
- clear_page(l3_ro_mpt);
+ pg = alloc_domheap_page(NULL);
+ l3_ro_mpt = clear_page(page_to_virt(pg));
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_from_page(
- virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l4e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_USER);
/*
* Allocate and map the machine-to-phys table.
@@ -107,12 +106,11 @@ void __init paging_init(void)
if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
{
unsigned long va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
+ struct page_info *pg = alloc_domheap_page(NULL);
- l2_ro_mpt = alloc_xenheap_page();
- clear_page(l2_ro_mpt);
+ l2_ro_mpt = clear_page(page_to_virt(pg));
l3_ro_mpt[l3_table_offset(va)] =
- l3e_from_page(
- virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l3e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_USER);
l2_ro_mpt += l2_table_offset(va);
}
/* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|