ChangeSet 1.1311.1.1, 2005/04/20 12:50:06+01:00, mafetter@xxxxxxxxxxxxxxxx
Cleanup page table handling. Add macros to access page table
entries, fixup plenty of places in the code to use the page
table types instead of "unsigned long".
Signed-off-by: Gerd Knorr <kraxel@xxxxxxxxxxx>
Signed-off-by: michael.fetterman@xxxxxxxxxxxx
arch/x86/dom0_ops.c | 2
arch/x86/domain.c | 6
arch/x86/domain_build.c | 76 ++++---
arch/x86/mm.c | 294 ++++++++++++++--------------
arch/x86/shadow.c | 360 ++++++++++++++++++-----------------
arch/x86/vmx.c | 7
arch/x86/vmx_platform.c | 4
arch/x86/x86_32/domain_page.c | 16 -
arch/x86/x86_32/mm.c | 57 ++---
arch/x86/x86_32/traps.c | 2
arch/x86/x86_64/mm.c | 75 +++----
common/grant_table.c | 10
include/asm-x86/mm.h | 17 -
include/asm-x86/page.h | 16 -
include/asm-x86/shadow.h | 285 ++++++++++++++-------------
include/asm-x86/x86_32/domain_page.h | 2
include/asm-x86/x86_32/page.h | 114 ++++++++---
include/asm-x86/x86_64/page.h | 198 +++++++++++++++----
18 files changed, 887 insertions(+), 654 deletions(-)
diff -Nru a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c 2005-04-20 10:03:28 -04:00
+++ b/xen/arch/x86/dom0_ops.c 2005-04-20 10:03:28 -04:00
@@ -425,7 +425,7 @@
{
for ( i = 0; i < 16; i++ )
c->gdt_frames[i] =
- l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
+ l1e_get_pfn(ed->arch.perdomain_ptes[i]);
c->gdt_ents = GET_GDT_ENTRIES(ed);
}
c->kernel_ss = ed->arch.kernel_ss;
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-04-20 10:03:28 -04:00
+++ b/xen/arch/x86/domain.c 2005-04-20 10:03:28 -04:00
@@ -260,11 +260,13 @@
d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
- mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
+ l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
+ __PAGE_HYPERVISOR);
d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
- mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
+ l3e_create_phys(__pa(d->arch.mm_perdomain_l2),
+ __PAGE_HYPERVISOR);
#endif
(void)ptwr_init(d);
diff -Nru a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c 2005-04-20 10:03:28 -04:00
+++ b/xen/arch/x86/domain_build.c 2005-04-20 10:03:28 -04:00
@@ -244,9 +244,9 @@
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
- mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
+ l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
- mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
+ l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
l2tab += l2_table_offset(dsi.v_start);
@@ -257,12 +257,14 @@
{
l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
mpt_alloc += PAGE_SIZE;
- *l2tab++ = mk_l2_pgentry((unsigned long)l1start | L2_PROT);
+ *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
+ l2tab++;
clear_page(l1tab);
if ( count == 0 )
l1tab += l1_table_offset(dsi.v_start);
}
- *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
+ *l1tab = l1e_create_pfn(mfn, L1_PROT);
+ l1tab++;
page = &frame_table[mfn];
if ( !get_page_and_type(page, d, PGT_writable_page) )
@@ -273,13 +275,13 @@
/* Pages that are part of page tables must be read only. */
l2tab = l2start + l2_table_offset(vpt_start);
- l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*l2tab);
+ l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*l2tab);
l1tab += l1_table_offset(vpt_start);
for ( count = 0; count < nr_pt_pages; count++ )
{
- page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
+ page = &frame_table[l1e_get_pfn(*l1tab)];
if ( !opt_dom0_shadow )
- *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
+ l1e_remove_flags(l1tab, _PAGE_RW);
else
if ( !get_page_type(page, PGT_writable_page) )
BUG();
@@ -317,7 +319,7 @@
get_page(page, d); /* an extra ref because of readable mapping */
}
if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
- l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*++l2tab);
+ l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*++l2tab);
}
#elif defined(__x86_64__)
@@ -335,9 +337,9 @@
l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
+ l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- mk_l4_pgentry(__pa(d->arch.mm_perdomain_l3) | __PAGE_HYPERVISOR);
+ l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
ed->arch.guest_table = mk_pagetable(__pa(l4start));
l4tab += l4_table_offset(dsi.v_start);
@@ -366,13 +368,17 @@
clear_page(l3tab);
if ( count == 0 )
l3tab += l3_table_offset(dsi.v_start);
- *l4tab++ = mk_l4_pgentry(__pa(l3start) | L4_PROT);
+ *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
+ l4tab++;
}
- *l3tab++ = mk_l3_pgentry(__pa(l2start) | L3_PROT);
+ *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
+ l3tab++;
}
- *l2tab++ = mk_l2_pgentry(__pa(l1start) | L2_PROT);
+ *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
+ l2tab++;
}
- *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
+ *l1tab = l1e_create_pfn(mfn, L1_PROT);
+ l1tab++;
page = &frame_table[mfn];
if ( (page->u.inuse.type_info == 0) &&
@@ -384,16 +390,16 @@
/* Pages that are part of page tables must be read only. */
l4tab = l4start + l4_table_offset(vpt_start);
- l3start = l3tab = l4_pgentry_to_l3(*l4tab);
+ l3start = l3tab = l4e_to_l3e(*l4tab);
l3tab += l3_table_offset(vpt_start);
- l2start = l2tab = l3_pgentry_to_l2(*l3tab);
+ l2start = l2tab = l3e_to_l2e(*l3tab);
l2tab += l2_table_offset(vpt_start);
- l1start = l1tab = l2_pgentry_to_l1(*l2tab);
+ l1start = l1tab = l2e_to_l1e(*l2tab);
l1tab += l1_table_offset(vpt_start);
for ( count = 0; count < nr_pt_pages; count++ )
{
- *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
- page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
+ l1e_remove_flags(l1tab, _PAGE_RW);
+ page = &frame_table[l1e_get_pfn(*l1tab)];
/* Read-only mapping + PGC_allocated + page-table page. */
page->count_info = PGC_allocated | 3;
@@ -412,10 +418,10 @@
if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
{
if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
- l3start = l3tab = l4_pgentry_to_l3(*++l4tab);
- l2start = l2tab = l3_pgentry_to_l2(*l3tab);
+ l3start = l3tab = l4e_to_l3e(*++l4tab);
+ l2start = l2tab = l3e_to_l2e(*l3tab);
}
- l1start = l1tab = l2_pgentry_to_l1(*l2tab);
+ l1start = l1tab = l2e_to_l1e(*l2tab);
}
}
@@ -525,8 +531,8 @@
#if defined(__i386__)
/* Destroy low mappings - they were only for our convenience. */
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
- if ( l2_pgentry_val(l2start[i]) & _PAGE_PSE )
- l2start[i] = mk_l2_pgentry(0);
+ if ( l2e_get_flags(l2start[i]) & _PAGE_PSE )
+ l2start[i] = l2e_empty();
zap_low_mappings(); /* Do the same for the idle page tables. */
#endif
@@ -544,17 +550,27 @@
: SHM_enable));
if ( opt_dom0_translate )
{
+ /* Hmm, what does this?
+ Looks like isn't portable across 32/64 bit and pae/non-pae ...
+ -- kraxel */
+
+ /* mafetter: This code is mostly a hack in order to be able to
+ * test with dom0's which are running with shadow translate.
+ * I expect we'll rip this out once we have a stable set of
+ * domU clients which use the various shadow modes, but it's
+ * useful to leave this here for now...
+ */
+
// map this domain's p2m table into current page table,
// so that we can easily access it.
//
- ASSERT( root_pgentry_val(idle_pg_table[1]) == 0 );
+ ASSERT( root_get_value(idle_pg_table[1]) == 0 );
ASSERT( pagetable_val(d->arch.phys_table) );
- idle_pg_table[1] = mk_root_pgentry(
- pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
+ idle_pg_table[1] =
root_create_phys(pagetable_val(d->arch.phys_table),
+ __PAGE_HYPERVISOR);
translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
- pagetable_val(ed->arch.guest_table)
- >> PAGE_SHIFT);
- idle_pg_table[1] = mk_root_pgentry(0);
+ pagetable_val(ed->arch.guest_table) >>
PAGE_SHIFT);
+ idle_pg_table[1] = root_empty();
local_flush_tlb();
}
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-20 10:03:28 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-20 10:03:28 -04:00
@@ -244,9 +244,9 @@
for ( i = 16; i < 32; i++ )
{
- pfn = l1_pgentry_to_pfn(d->arch.perdomain_ptes[i]);
+ pfn = l1e_get_pfn(d->arch.perdomain_ptes[i]);
if ( pfn == 0 ) continue;
- d->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
+ d->arch.perdomain_ptes[i] = l1e_empty();
page = &frame_table[pfn];
ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
ASSERT_PAGE_IS_DOMAIN(page, d->domain);
@@ -283,7 +283,8 @@
{
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- unsigned long l1e, nl1e, gpfn, gmfn;
+ unsigned long gpfn, gmfn;
+ l1_pgentry_t l1e, nl1e;
unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
int res;
@@ -301,13 +302,14 @@
shadow_sync_va(ed, gva);
TOGGLE_MODE();
- __get_user(l1e, (unsigned long *)&linear_pg_table[l1_linear_offset(gva)]);
+ __copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
+ sizeof(l1e));
TOGGLE_MODE();
- if ( unlikely(!(l1e & _PAGE_PRESENT)) )
+ if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
return 0;
- gpfn = l1_pgentry_to_pfn(mk_l1_pgentry(l1e));
+ gpfn = l1e_get_pfn(l1e);
gmfn = __gpfn_to_mfn(d, gpfn);
if ( unlikely(!VALID_MFN(gmfn)) )
return 0;
@@ -325,9 +327,9 @@
if ( unlikely(!res) )
return 0;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|