# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 69bf77e1b10272ebc40013ae6b8e5b0740df701c
# Parent deff07c1b686479f117d18ed12334e94310e9f69
Writable pagetables for x86/64. Xen portion.
Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/audit.c Mon Aug 8 08:18:06 2005
@@ -73,7 +73,7 @@
if ( tcount < 0 )
{
APRINTK("Audit %d: type count went below zero "
- "mfn=%lx t=%x ot=%x",
+ "mfn=%lx t=%" PRtype_info " ot=%x",
d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
@@ -82,7 +82,7 @@
else if ( (tcount & ~PGT_count_mask) != 0 )
{
APRINTK("Audit %d: type count overflowed "
- "mfn=%lx t=%x ot=%x",
+ "mfn=%lx t=%" PRtype_info " ot=%x",
d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
@@ -101,7 +101,7 @@
if ( count < 0 )
{
APRINTK("Audit %d: general count went below zero "
- "mfn=%lx t=%x ot=%x",
+ "mfn=%lx t=%" PRtype_info " ot=%x",
d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
@@ -110,7 +110,7 @@
else if ( (count & ~PGT_count_mask) != 0 )
{
APRINTK("Audit %d: general count overflowed "
- "mfn=%lx t=%x ot=%x",
+ "mfn=%lx t=%" PRtype_info " ot=%x",
d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
@@ -152,7 +152,8 @@
if ( page_type != PGT_l1_shadow )
{
printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
- "Expected Shadow L1 t=%x mfn=%lx\n",
+ "Expected Shadow L1 t=%" PRtype_info
+ " mfn=%lx\n",
d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
@@ -178,14 +179,14 @@
if ( page_type == PGT_l2_page_table )
{
printk("Audit %d: [%x] Found %s Linear PT "
- "t=%x mfn=%lx\n",
+ "t=%" PRtype_info " mfn=%lx\n",
d->domain_id, i, (l1mfn==mfn) ? "Self" :
"Other",
l1page->u.inuse.type_info, l1mfn);
}
else if ( page_type != PGT_l1_page_table )
{
printk("Audit %d: [L2 mfn=%lx i=%x] "
- "Expected L1 t=%x mfn=%lx\n",
+ "Expected L1 t=%" PRtype_info " mfn=%lx\n",
d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
@@ -237,7 +238,8 @@
if ( page_get_owner(gpage) != d )
{
printk("Audit %d: [hl2mfn=%lx,i=%x] Skip foreign page "
- "dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
+ "dom=%p (id=%d) mfn=%lx c=%08x t=%"
+ PRtype_info "\n",
d->domain_id, hl2mfn, i,
page_get_owner(gpage),
page_get_owner(gpage)->domain_id,
@@ -288,7 +290,7 @@
PGT_writable_page) )
{
printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW "
- "t=%x mfn=%lx\n",
+ "t=%" PRtype_info " mfn=%lx\n",
d->domain_id, l1mfn, i,
gpage->u.inuse.type_info, gmfn);
errors++;
@@ -308,7 +310,8 @@
if ( page_get_owner(gpage) != d )
{
printk("Audit %d: [l1mfn=%lx,i=%x] Skip foreign page "
- "dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
+ "dom=%p (id=%d) mfn=%lx c=%08x t=%"
+ PRtype_info "\n",
d->domain_id, l1mfn, i,
page_get_owner(gpage),
page_get_owner(gpage)->domain_id,
@@ -454,7 +457,7 @@
if ( shadow_refcounts )
{
printk("Audit %d: found an L2 guest page "
- "mfn=%lx t=%08x c=%08x while in shadow mode\n",
+ "mfn=%lx t=%" PRtype_info " c=%08x while in
shadow mode\n",
d->domain_id, mfn, page->u.inuse.type_info,
page->count_info);
errors++;
@@ -465,14 +468,16 @@
if ( (page->u.inuse.type_info & PGT_validated) !=
PGT_validated )
{
- printk("Audit %d: L2 mfn=%lx not validated %08x\n",
+ printk("Audit %d: L2 mfn=%lx not validated %"
+ PRtype_info "\n",
d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
if ( (page->u.inuse.type_info & PGT_pinned) !=
PGT_pinned )
{
- printk("Audit %d: L2 mfn=%lx not pinned t=%08x\n",
+ printk("Audit %d: L2 mfn=%lx not pinned t=%"
+ PRtype_info "\n",
d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
@@ -494,7 +499,8 @@
{
if ( shadow_refcounts )
{
- printk("found an L1 guest page mfn=%lx t=%08x c=%08x "
+ printk("found an L1 guest page mfn=%lx t=%"
+ PRtype_info " c=%08x "
"while in shadow mode\n",
mfn, page->u.inuse.type_info, page->count_info);
errors++;
@@ -505,7 +511,8 @@
if ( (page->u.inuse.type_info & PGT_validated) !=
PGT_validated )
{
- printk("Audit %d: L1 not validated mfn=%lx
t=%08x\n",
+ printk("Audit %d: L1 not validated mfn=%lx t=%"
+ PRtype_info "\n",
d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
@@ -514,7 +521,8 @@
{
if ( !VM_ASSIST(d,
VMASST_TYPE_writable_pagetables) )
{
- printk("Audit %d: L1 mfn=%lx not pinned
t=%08x\n",
+ printk("Audit %d: L1 mfn=%lx not pinned t=%"
+ PRtype_info "\n",
d->domain_id, mfn,
page->u.inuse.type_info);
}
}
@@ -621,7 +629,7 @@
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
if ( (pt[i] & _PAGE_PRESENT) && ((pt[i] >> PAGE_SHIFT) == xmfn) )
- printk(" found dom=%d mfn=%lx t=%08x c=%08x "
+ printk(" found dom=%d mfn=%lx t=%" PRtype_info " c=%08x "
"pt[i=%x]=%lx\n",
d->domain_id, mfn, page->u.inuse.type_info,
page->count_info, i, pt[i]);
@@ -754,7 +762,7 @@
if ( (page->u.inuse.type_info & PGT_count_mask) >
(page->count_info & PGC_count_mask) )
{
- printk("taf(%08x) > caf(%08x) mfn=%lx\n",
+ printk("taf(%" PRtype_info ") > caf(%08x) mfn=%lx\n",
page->u.inuse.type_info, page->count_info, mfn);
errors++;
}
@@ -763,8 +771,8 @@
(page_type == PGT_writable_page) &&
!(page->u.inuse.type_info & PGT_validated) )
{
- printk("shadow mode writable page not validated mfn=%lx "
- "t=%08x c=%08x\n",
+ printk("shadow mode writable page not validated mfn=%lx "
+ "t=%" PRtype_info " c=%08x\n",
mfn, page->u.inuse.type_info, page->count_info);
errors++;
}
@@ -774,7 +782,7 @@
(page->u.inuse.type_info & PGT_count_mask) > 1 )
{
printk("writeable page with type count >1: "
- "mfn=%lx t=%08x c=%08x\n",
+ "mfn=%lx t=%" PRtype_info " c=%08x\n",
mfn,
page->u.inuse.type_info,
page->count_info );
@@ -786,7 +794,7 @@
if ( page_type == PGT_none &&
(page->u.inuse.type_info & PGT_count_mask) > 0 )
{
- printk("normal page with type count >0: mfn=%lx t=%08x c=%08x\n",
+ printk("normal page with type count >0: mfn=%lx t=%" PRtype_info "
c=%08x\n",
mfn,
page->u.inuse.type_info,
page->count_info );
@@ -812,7 +820,7 @@
: !(page_type && (page_type <= PGT_l4_page_table)) )
{
printk("out of sync page mfn=%lx has strange type "
- "t=%08x c=%08x\n",
+ "t=%" PRtype_info " c=%08x\n",
mfn, page->u.inuse.type_info, page->count_info);
errors++;
}
@@ -850,7 +858,7 @@
case PGT_l4_page_table:
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
- printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
+ printk("Audit %d: type count!=0 t=%" PRtype_info " ot=%x c=%x
mfn=%lx\n",
d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
@@ -864,7 +872,7 @@
case PGT_ldt_page:
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
- printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
+ printk("Audit %d: type count!=0 t=%" PRtype_info " ot=%x c=%x
mfn=%lx\n",
d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
@@ -877,7 +885,7 @@
if ( (page->count_info & PGC_count_mask) != 1 )
{
- printk("Audit %d: gen count!=1 (c=%x) t=%x ot=%x mfn=%lx\n",
+ printk("Audit %d: gen count!=1 (c=%x) t=%" PRtype_info " ot=%x
mfn=%lx\n",
d->domain_id,
page->count_info,
page->u.inuse.type_info,
@@ -913,7 +921,7 @@
(page->count_info != 0) )
{
printk("Audit %d: shadow page counts wrong "
- "mfn=%lx t=%08x c=%08x\n",
+ "mfn=%lx t=%" PRtype_info " c=%08x\n",
d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->count_info);
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/domain.c Mon Aug 8 08:18:06 2005
@@ -190,7 +190,7 @@
{
list_for_each_entry ( page, &d->page_list, list )
{
- printk("Page %p: caf=%08x, taf=%08x\n",
+ printk("Page %p: caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), page->count_info,
page->u.inuse.type_info);
}
@@ -198,14 +198,14 @@
list_for_each_entry ( page, &d->xenpage_list, list )
{
- printk("XenPage %p: caf=%08x, taf=%08x\n",
+ printk("XenPage %p: caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), page->count_info,
page->u.inuse.type_info);
}
page = virt_to_page(d->shared_info);
- printk("Shared_info@%p: caf=%08x, taf=%08x\n",
+ printk("Shared_info@%p: caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), page->count_info,
page->u.inuse.type_info);
}
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/mm.c Mon Aug 8 08:18:06 2005
@@ -122,7 +122,7 @@
static void free_l1_table(struct pfn_info *page);
static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
- unsigned int type);
+ unsigned long type);
static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
/* Used to defer flushing of memory structures. */
@@ -354,7 +354,7 @@
static int get_page_and_type_from_pagenr(unsigned long page_nr,
- u32 type,
+ unsigned long type,
struct domain *d)
{
struct pfn_info *page = &frame_table[page_nr];
@@ -365,7 +365,7 @@
if ( unlikely(!get_page_type(page, type)) )
{
if ( (type & PGT_type_mask) != PGT_l1_page_table )
- MEM_LOG("Bad page type for pfn %lx (%08x)",
+ MEM_LOG("Bad page type for pfn %lx (%" PRtype_info ")",
page_nr, page->u.inuse.type_info);
put_page(page);
return 0;
@@ -390,7 +390,7 @@
get_linear_pagetable(
root_pgentry_t re, unsigned long re_pfn, struct domain *d)
{
- u32 x, y;
+ unsigned long x, y;
struct pfn_info *page;
unsigned long pfn;
@@ -544,7 +544,8 @@
static int
get_page_from_l4e(
- l4_pgentry_t l4e, unsigned long pfn, struct domain *d)
+ l4_pgentry_t l4e, unsigned long pfn,
+ struct domain *d, unsigned long vaddr)
{
int rc;
@@ -559,8 +560,11 @@
return 0;
}
+ vaddr >>= L4_PAGETABLE_SHIFT;
+ vaddr <<= PGT_va_shift;
rc = get_page_and_type_from_pagenr(
- l4e_get_pfn(l4e), PGT_l3_page_table, d);
+ l4e_get_pfn(l4e),
+ PGT_l3_page_table | vaddr, d);
if ( unlikely(!rc) )
return get_linear_pagetable(l4e, pfn, d);
@@ -750,13 +754,47 @@
return 1;
}
+#elif CONFIG_X86_64
+# define create_pae_xen_mappings(pl3e) (1)
+
+static inline int l1_backptr(
+ unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
+{
+ unsigned long l2_backptr = l2_type & PGT_va_mask;
+ BUG_ON(l2_backptr == PGT_va_unknown);
+
+ *backptr = ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) |
+ (offset_in_l2 << L2_PAGETABLE_SHIFT);
+ return 1;
+}
+
+static inline int l2_backptr(
+ unsigned long *backptr, unsigned long offset_in_l3, unsigned long l3_type)
+{
+ unsigned long l3_backptr = l3_type & PGT_va_mask;
+ BUG_ON(l3_backptr == PGT_va_unknown);
+
+ *backptr = ((l3_backptr >> PGT_va_shift) << L4_PAGETABLE_SHIFT) |
+ (offset_in_l3 << L3_PAGETABLE_SHIFT);
+ return 1;
+}
+
+static inline int l3_backptr(
+ unsigned long *backptr, unsigned long offset_in_l4, unsigned long l4_type)
+{
+ unsigned long l4_backptr = l4_type & PGT_va_mask;
+ BUG_ON(l4_backptr == PGT_va_unknown);
+
+ *backptr = (offset_in_l4 << L4_PAGETABLE_SHIFT);
+ return 1;
+}
#else
# define create_pae_xen_mappings(pl3e) (1)
# define l1_backptr(bp,l2o,l2t) \
({ *(bp) = (unsigned long)(l2o) << L2_PAGETABLE_SHIFT; 1; })
#endif
-static int alloc_l2_table(struct pfn_info *page, unsigned int type)
+static int alloc_l2_table(struct pfn_info *page, unsigned long type)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_pfn(page);
@@ -808,7 +846,7 @@
#if CONFIG_PAGING_LEVELS >= 3
-static int alloc_l3_table(struct pfn_info *page)
+static int alloc_l3_table(struct pfn_info *page, unsigned long type)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_pfn(page);
@@ -821,7 +859,12 @@
pl3e = map_domain_page(pfn);
for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
{
- vaddr = (unsigned long)i << L3_PAGETABLE_SHIFT;
+#if CONFIG_PAGING_LEVELS >= 4
+ if ( !l2_backptr(&vaddr, i, type) )
+ goto fail;
+#else
+ vaddr = (unsigned long)i << L3_PAGETABLE_SHIFT;
+#endif
if ( is_guest_l3_slot(i) &&
unlikely(!get_page_from_l3e(pl3e[i], pfn, d, vaddr)) )
goto fail;
@@ -842,15 +885,16 @@
return 0;
}
#else
-#define alloc_l3_table(page) (0)
+#define alloc_l3_table(page, type) (0)
#endif
#if CONFIG_PAGING_LEVELS >= 4
-static int alloc_l4_table(struct pfn_info *page)
+static int alloc_l4_table(struct pfn_info *page, unsigned long type)
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_pfn(page);
l4_pgentry_t *pl4e = page_to_virt(page);
+ unsigned long vaddr;
int i;
/* See the code in shadow_promote() to understand why this is here. */
@@ -859,10 +903,14 @@
return 1;
ASSERT(!shadow_mode_refcounts(d));
- for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
+ for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ ) {
+ if ( !l3_backptr(&vaddr, i, type) )
+ goto fail;
+
if ( is_guest_l4_slot(i) &&
- unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
+ unlikely(!get_page_from_l4e(pl4e[i], pfn, d, vaddr)) )
goto fail;
+ }
/* Xen private mappings. */
memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
@@ -885,7 +933,7 @@
return 0;
}
#else
-#define alloc_l4_table(page) (0)
+#define alloc_l4_table(page, type) (0)
#endif
@@ -1037,7 +1085,7 @@
static int mod_l2_entry(l2_pgentry_t *pl2e,
l2_pgentry_t nl2e,
unsigned long pfn,
- unsigned int type)
+ unsigned long type)
{
l2_pgentry_t ol2e;
unsigned long vaddr = 0;
@@ -1090,7 +1138,8 @@
/* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */
static int mod_l3_entry(l3_pgentry_t *pl3e,
l3_pgentry_t nl3e,
- unsigned long pfn)
+ unsigned long pfn,
+ unsigned long type)
{
l3_pgentry_t ol3e;
unsigned long vaddr;
@@ -1126,10 +1175,16 @@
if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
+#if CONFIG_PAGING_LEVELS >= 4
+ if ( unlikely(!l2_backptr(&vaddr, pgentry_ptr_to_slot(pl3e), type)) ||
+ unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
+ return 0;
+#else
vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
<< L3_PAGETABLE_SHIFT;
if ( unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
return 0;
+#endif
if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
{
@@ -1141,12 +1196,14 @@
put_page_from_l3e(ol3e, pfn);
return 1;
}
-
- if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
- {
- BUG_ON(!create_pae_xen_mappings(pl3e));
- return 0;
- }
+ else
+ {
+ if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
+ {
+ BUG_ON(!create_pae_xen_mappings(pl3e));
+ return 0;
+ }
+ }
put_page_from_l3e(ol3e, pfn);
return 1;
@@ -1159,9 +1216,11 @@
/* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
static int mod_l4_entry(l4_pgentry_t *pl4e,
l4_pgentry_t nl4e,
- unsigned long pfn)
+ unsigned long pfn,
+ unsigned long type)
{
l4_pgentry_t ol4e;
+ unsigned long vaddr;
if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
{
@@ -1185,7 +1244,8 @@
if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
- if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
+ if ( unlikely(!l3_backptr(&vaddr, pgentry_ptr_to_slot(pl4e), type)) ||
+ unlikely(!get_page_from_l4e(nl4e, pfn, current->domain, vaddr)) )
return 0;
if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
@@ -1193,13 +1253,12 @@
put_page_from_l4e(nl4e, pfn);
return 0;
}
-
- put_page_from_l4e(ol4e, pfn);
- return 1;
- }
-
- if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
- return 0;
+ }
+ else
+ {
+ if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
+ return 0;
+ }
put_page_from_l4e(ol4e, pfn);
return 1;
@@ -1207,7 +1266,7 @@
#endif
-int alloc_page_type(struct pfn_info *page, unsigned int type)
+int alloc_page_type(struct pfn_info *page, unsigned long type)
{
switch ( type & PGT_type_mask )
{
@@ -1216,14 +1275,14 @@
case PGT_l2_page_table:
return alloc_l2_table(page, type);
case PGT_l3_page_table:
- return alloc_l3_table(page);
+ return alloc_l3_table(page, type);
case PGT_l4_page_table:
- return alloc_l4_table(page);
+ return alloc_l4_table(page, type);
case PGT_gdt_page:
case PGT_ldt_page:
return alloc_segdesc_page(page);
default:
- printk("Bad type in alloc_page_type %x t=%x c=%x\n",
+ printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n",
type, page->u.inuse.type_info,
page->count_info);
BUG();
@@ -1233,7 +1292,7 @@
}
-void free_page_type(struct pfn_info *page, unsigned int type)
+void free_page_type(struct pfn_info *page, unsigned long type)
{
struct domain *owner = page_get_owner(page);
unsigned long gpfn;
@@ -1273,7 +1332,7 @@
#endif
default:
- printk("%s: type %x pfn %lx\n",__FUNCTION__,
+ printk("%s: type %lx pfn %lx\n",__FUNCTION__,
type, page_to_pfn(page));
BUG();
}
@@ -1282,7 +1341,7 @@
void put_page_type(struct pfn_info *page)
{
- u32 nx, x, y = page->u.inuse.type_info;
+ unsigned long nx, x, y = page->u.inuse.type_info;
again:
do {
@@ -1335,9 +1394,9 @@
}
-int get_page_type(struct pfn_info *page, u32 type)
-{
- u32 nx, x, y = page->u.inuse.type_info;
+int get_page_type(struct pfn_info *page, unsigned long type)
+{
+ unsigned long nx, x, y = page->u.inuse.type_info;
again:
do {
@@ -1350,7 +1409,11 @@
}
else if ( unlikely((x & PGT_count_mask) == 0) )
{
+#ifdef CONFIG_X86_64
+ if ( (x & (PGT_type_mask|PGT_va_mask)) != (type & ~PGT_va_mask))
+#else
if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
+#endif
{
if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
{
@@ -1382,13 +1445,17 @@
}
else
{
+#ifdef CONFIG_X86_64
+ if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != (type &
~PGT_va_mask)) )
+#else
if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
+#endif
{
if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
{
if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
((type & PGT_type_mask) != PGT_l1_page_table) )
- MEM_LOG("Bad type (saw %08x != exp %08x) for pfn %lx",
+ MEM_LOG("Bad type (saw %" PRtype_info "!= exp %"
PRtype_info ") for pfn %lx",
x, type, page_to_pfn(page));
return 0;
}
@@ -1427,8 +1494,8 @@
/* Try to validate page type; drop the new reference on failure. */
if ( unlikely(!alloc_page_type(page, type)) )
{
- MEM_LOG("Error while validating pfn %lx for type %08x."
- " caf=%08x taf=%08x",
+ MEM_LOG("Error while validating pfn %lx for type %" PRtype_info "."
+ " caf=%08x taf=%" PRtype_info,
page_to_pfn(page), type,
page->count_info,
page->u.inuse.type_info);
@@ -1596,7 +1663,7 @@
{
struct mmuext_op op;
int rc = 0, i = 0, okay, cpu = smp_processor_id();
- unsigned int type, done = 0;
+ unsigned long type, done = 0;
struct pfn_info *page;
struct vcpu *v = current;
struct domain *d = v->domain, *e;
@@ -1651,6 +1718,9 @@
type = PGT_l1_page_table | PGT_va_mutable;
pin_page:
+#if CONFIG_PAGING_LEVELS >= 4
+ type |= PGT_va_mutable;
+#endif
if ( shadow_mode_refcounts(FOREIGNDOM) )
type = PGT_writable_page;
@@ -1876,7 +1946,7 @@
unlikely(_nd != _d) )
{
MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
- " caf=%08x, taf=%08x\n", page_to_pfn(page),
+ " caf=%08x, taf=%" PRtype_info "\n",
page_to_pfn(page),
d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
okay = 0;
@@ -1951,7 +2021,7 @@
unsigned int cmd, done = 0;
struct vcpu *v = current;
struct domain *d = v->domain;
- u32 type_info;
+ unsigned long type_info;
struct domain_mmap_cache mapcache, sh_mapcache;
LOCK_BIGLOCK(d);
@@ -2063,13 +2133,14 @@
#if CONFIG_PAGING_LEVELS >= 3
case PGT_l3_page_table:
ASSERT( !shadow_mode_refcounts(d) );
- if ( likely(get_page_type(page, PGT_l3_page_table)) )
+ if ( likely(get_page_type(
+ page, type_info & (PGT_type_mask|PGT_va_mask))) )
{
l3_pgentry_t l3e;
/* FIXME: doesn't work with PAE */
l3e = l3e_from_intpte(req.val);
- okay = mod_l3_entry(va, l3e, mfn);
+ okay = mod_l3_entry(va, l3e, mfn, type_info);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l3_normal_pt_update(d, req.ptr, l3e,
&sh_mapcache);
put_page_type(page);
@@ -2079,12 +2150,13 @@
#if CONFIG_PAGING_LEVELS >= 4
case PGT_l4_page_table:
ASSERT( !shadow_mode_refcounts(d) );
- if ( likely(get_page_type(page, PGT_l4_page_table)) )
+ if ( likely(get_page_type(
+ page, type_info & (PGT_type_mask|PGT_va_mask))) )
{
l4_pgentry_t l4e;
l4e = l4e_from_intpte(req.val);
- okay = mod_l4_entry(va, l4e, mfn);
+ okay = mod_l4_entry(va, l4e, mfn, type_info);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l4_normal_pt_update(d, req.ptr, l4e,
&sh_mapcache);
put_page_type(page);
@@ -2618,11 +2690,19 @@
l1_pgentry_t *pl1e;
l2_pgentry_t *pl2e;
unsigned int modified;
+#if defined(__x86_64__)
+ struct vcpu *v = current;
+ /* If in user mode, switch to kernel mode just to read LDT mapping. */
+ extern void toggle_guest_mode(struct vcpu *);
+ int user_mode = !(v->arch.flags & TF_kernel_mode);
+#endif
ASSERT(!shadow_mode_enabled(d));
if ( unlikely(d->arch.ptwr[which].vcpu != current) )
write_ptbase(d->arch.ptwr[which].vcpu);
+ else
+ TOGGLE_MODE();
l1va = d->arch.ptwr[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
@@ -2689,6 +2769,8 @@
if ( unlikely(d->arch.ptwr[which].vcpu != current) )
write_ptbase(current);
+ else
+ TOGGLE_MODE();
}
static int ptwr_emulated_update(
@@ -2747,7 +2829,7 @@
((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
(page_get_owner(page) != d) )
{
- MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%lx, %08x)\n",
+ MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%lx, %"
PRtype_info ")\n",
l1e_get_pfn(pte), page->u.inuse.type_info);
return X86EMUL_UNHANDLEABLE;
}
@@ -2820,6 +2902,35 @@
.cmpxchg8b_emulated = ptwr_emulated_cmpxchg8b
};
+#if defined(__x86_64__)
+/*
+ * Returns zero on if mapped, or -1 otherwise
+ */
+static int __not_mapped(l2_pgentry_t *pl2e)
+{
+ unsigned long page = read_cr3();
+
+ page &= PAGE_MASK;
+ page = ((unsigned long *) __va(page))[l4_table_offset((unsigned
long)pl2e)];
+ if ( !(page & _PAGE_PRESENT) )
+ return -1;
+
+ page &= PAGE_MASK;
+ page = ((unsigned long *) __va(page))[l3_table_offset((unsigned
long)pl2e)];
+ if ( !(page & _PAGE_PRESENT) )
+ return -1;
+
+ page &= PAGE_MASK;
+ page = ((unsigned long *) __va(page))[l2_table_offset((unsigned
long)pl2e)];
+ if ( !(page & _PAGE_PRESENT) )
+ return -1;
+
+ return 0;
+}
+#else
+#define __not_mapped(p) (0)
+#endif
+
/* Write page fault handler: check if guest is trying to modify a PTE. */
int ptwr_do_page_fault(struct domain *d, unsigned long addr)
{
@@ -2828,7 +2939,7 @@
l1_pgentry_t pte;
l2_pgentry_t *pl2e;
int which;
- u32 l2_idx;
+ unsigned long l2_idx;
if ( unlikely(shadow_mode_enabled(d)) )
return 0;
@@ -2837,7 +2948,7 @@
* Attempt to read the PTE that maps the VA being accessed. By checking for
* PDE validity in the L2 we avoid many expensive fixups in __get_user().
*/
- if ( !(l2e_get_flags(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
+ if ( !(l2e_get_flags(__linear_l2_table[l2_linear_offset(addr)]) &
_PAGE_PRESENT) ||
__copy_from_user(&pte,&linear_pg_table[l1_linear_offset(addr)],
sizeof(pte)) )
@@ -2857,18 +2968,13 @@
return 0;
}
- /* x86/64: Writable pagetable code needs auditing. Use emulator for now. */
-#if defined(__x86_64__)
- goto emulate;
-#endif
-
/* Get the L2 index at which this L1 p.t. is always mapped. */
l2_idx = page->u.inuse.type_info & PGT_va_mask;
if ( unlikely(l2_idx >= PGT_va_unknown) )
goto emulate; /* Urk! This L1 is mapped in multiple L2 slots! */
l2_idx >>= PGT_va_shift;
- if ( unlikely(l2_idx == (addr >> L2_PAGETABLE_SHIFT)) )
+ if ( unlikely(l2_idx == l2_linear_offset(addr)) )
goto emulate; /* Urk! Pagetable maps itself! */
/*
@@ -2877,6 +2983,10 @@
*/
pl2e = &__linear_l2_table[l2_idx];
which = PTWR_PT_INACTIVE;
+
+ if ( unlikely(__not_mapped(pl2e)) )
+ goto inactive;
+
if ( (l2e_get_pfn(*pl2e)) == pfn )
{
/*
@@ -2891,6 +3001,8 @@
which = PTWR_PT_ACTIVE;
}
+ inactive:
+
/*
* If this is a multi-processor guest then ensure that the page is hooked
* into at most one L2 table, which must be the one running on this VCPU.
@@ -2905,7 +3017,7 @@
goto emulate;
}
- PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08x, "
+ PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
"pfn %lx\n", PTWR_PRINT_WHICH,
addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
@@ -2946,11 +3058,11 @@
/* Finally, make the p.t. page writable by the guest OS. */
l1e_add_flags(pte, _PAGE_RW);
- if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
+ if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(addr)],
&pte, sizeof(pte))) )
{
MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
- &linear_pg_table[addr>>PAGE_SHIFT]);
+ &linear_pg_table[l1_linear_offset(addr)]);
/* Toss the writable pagetable state and crash. */
unmap_domain_page(d->arch.ptwr[which].pl1e);
d->arch.ptwr[which].l1va = 0;
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/shadow32.c Mon Aug 8 08:18:06 2005
@@ -418,7 +418,7 @@
break;
default:
- printk("Free shadow weird page type mfn=%lx type=%08x\n",
+ printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
page_to_pfn(page), page->u.inuse.type_info);
break;
}
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/shadow_public.c Mon Aug 8 08:18:06 2005
@@ -571,7 +571,7 @@
break;
default:
- printk("Free shadow weird page type mfn=%lx type=%08x\n",
+ printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
page_to_pfn(page), page->u.inuse.type_info);
break;
}
@@ -1638,14 +1638,14 @@
/* XXX This needs more thought... */
printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n",
__func__, page_to_pfn(page));
- printk("Before: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
+ printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
page->count_info, page->u.inuse.type_info);
shadow_lock(d);
__shadow_remove_all_access(d, page_to_pfn(page));
shadow_unlock(d);
- printk("After: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
+ printk("After: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
page->count_info, page->u.inuse.type_info);
}
diff -r deff07c1b686 -r 69bf77e1b102 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Sun Aug 7 09:13:39 2005
+++ b/xen/arch/x86/traps.c Mon Aug 8 08:18:06 2005
@@ -422,7 +422,7 @@
{
LOCK_BIGLOCK(d);
if ( unlikely(d->arch.ptwr[PTWR_PT_ACTIVE].l1va) &&
- unlikely((addr >> L2_PAGETABLE_SHIFT) ==
+ unlikely(l2_linear_offset(addr) ==
d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx) )
{
ptwr_flush(d, PTWR_PT_ACTIVE);
@@ -430,7 +430,12 @@
return EXCRET_fault_fixed;
}
- if ( (addr < HYPERVISOR_VIRT_START) &&
+ if ( ((addr < HYPERVISOR_VIRT_START)
+#if defined(__x86_64__)
+ || (addr >= HYPERVISOR_VIRT_END)
+#endif
+ )
+ &&
KERNEL_MODE(v, regs) &&
((regs->error_code & 3) == 3) && /* write-protection fault */
ptwr_do_page_fault(d, addr) )
@@ -459,7 +464,7 @@
goto xen_fault;
propagate_page_fault(addr, regs->error_code);
- return 0;
+ return 0;
xen_fault:
diff -r deff07c1b686 -r 69bf77e1b102 xen/common/grant_table.c
--- a/xen/common/grant_table.c Sun Aug 7 09:13:39 2005
+++ b/xen/common/grant_table.c Mon Aug 8 08:18:06 2005
@@ -859,7 +859,7 @@
if (unlikely((x & (PGC_count_mask|PGC_allocated)) !=
(1 | PGC_allocated)) || unlikely(_nd != _d)) {
printk("gnttab_donate: Bad page values %p: ed=%p(%u), sd=%p,"
- " caf=%08x, taf=%08x\n", (void *) page_to_pfn(page),
+ " caf=%08x, taf=%" PRtype_info "\n", (void *)
page_to_pfn(page),
d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
spin_unlock(&d->page_alloc_lock);
diff -r deff07c1b686 -r 69bf77e1b102 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Sun Aug 7 09:13:39 2005
+++ b/xen/include/asm-x86/mm.h Mon Aug 8 08:18:06 2005
@@ -36,7 +36,7 @@
/* Owner of this page (NULL if page is anonymous). */
u32 _domain; /* pickled format */
/* Type reference count and various PGT_xxx flags and fields. */
- u32 type_info;
+ unsigned long type_info;
} inuse;
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
@@ -77,6 +77,7 @@
/* Owning guest has pinned this page to its current type? */
#define _PGT_pinned 27
#define PGT_pinned (1U<<_PGT_pinned)
+#if defined(__i386__)
/* The 11 most significant bits of virt address if this is a page table. */
#define PGT_va_shift 16
#define PGT_va_mask (((1U<<11)-1)<<PGT_va_shift)
@@ -84,6 +85,19 @@
#define PGT_va_mutable (((1U<<11)-1)<<PGT_va_shift)
/* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
#define PGT_va_unknown (((1U<<11)-2)<<PGT_va_shift)
+#elif defined(__x86_64__)
+ /* The 27 most significant bits of virt address if this is a page table. */
+#define PGT_va_shift 32
+#define PGT_va_mask ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
+ /* Is the back pointer still mutable (i.e. not fixed yet)? */
+ /* Use PML4 slot for HYPERVISOR_VIRT_START.
+ 18 = L4_PAGETABLE_SHIFT - L2_PAGETABLE_SHIFT */
+#define PGT_va_mutable ((unsigned long)(256U<<18)<<PGT_va_shift)
+ /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
+ /* Use PML4 slot for HYPERVISOR_VIRT_START + 1 */
+#define PGT_va_unknown ((unsigned long)(257U<<18)<<PGT_va_shift)
+#endif
+
/* 16-bit count of uses of this frame as its current type. */
#define PGT_count_mask ((1U<<16)-1)
@@ -114,11 +128,13 @@
#if defined(__i386__)
#define pickle_domptr(_d) ((u32)(unsigned long)(_d))
#define unpickle_domptr(_d) ((struct domain *)(unsigned long)(_d))
+#define PRtype_info "08lx" /* should only be used for printk's */
#elif defined(__x86_64__)
static inline struct domain *unpickle_domptr(u32 _domain)
{ return (_domain == 0) ? NULL : __va(_domain); }
static inline u32 pickle_domptr(struct domain *domain)
{ return (domain == NULL) ? 0 : (u32)__pa(domain); }
+#define PRtype_info "016lx"/* should only be used for printk's */
#endif
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
@@ -144,8 +160,8 @@
extern unsigned long max_page;
void init_frametable(void);
-int alloc_page_type(struct pfn_info *page, unsigned int type);
-void free_page_type(struct pfn_info *page, unsigned int type);
+int alloc_page_type(struct pfn_info *page, unsigned long type);
+void free_page_type(struct pfn_info *page, unsigned long type);
extern void invalidate_shadow_ldt(struct vcpu *d);
extern int shadow_remove_all_write_access(
struct domain *d, unsigned long gpfn, unsigned long gmfn);
@@ -183,7 +199,7 @@
unlikely(d != _domain) ) /* Wrong owner? */
{
if ( !_shadow_mode_refcounts(domain) )
- DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
+ DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%"
PRtype_info "\n",
page_to_pfn(page), domain, unpickle_domptr(d),
x, page->u.inuse.type_info);
return 0;
@@ -200,7 +216,7 @@
}
void put_page_type(struct pfn_info *page);
-int get_page_type(struct pfn_info *page, u32 type);
+int get_page_type(struct pfn_info *page, unsigned long type);
int get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
@@ -213,7 +229,7 @@
static inline int get_page_and_type(struct pfn_info *page,
struct domain *domain,
- u32 type)
+ unsigned long type)
{
int rc = get_page(page, domain);
diff -r deff07c1b686 -r 69bf77e1b102 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h Sun Aug 7 09:13:39 2005
+++ b/xen/include/asm-x86/page.h Mon Aug 8 08:18:06 2005
@@ -208,20 +208,21 @@
+ DOMAIN_ENTRIES_PER_L4_PAGETABLE)
#endif
-#define linear_l1_table \
+#define VA_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_START & VADDR_MASK)
+#define linear_l1_table \
((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
-#define __linear_l2_table \
- ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
-#define __linear_l3_table \
- ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
-#define __linear_l4_table \
- ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) + \
- (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
+#define __linear_l2_table \
+ ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
+#define __linear_l3_table \
+ ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
+#define __linear_l4_table \
+ ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) + \
+ (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
#define linear_pg_table linear_l1_table
#define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
diff -r deff07c1b686 -r 69bf77e1b102 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Sun Aug 7 09:13:39 2005
+++ b/xen/include/asm-x86/shadow.h Mon Aug 8 08:18:06 2005
@@ -485,7 +485,7 @@
{
SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
- SH_LOG("dom=%p caf=%08x taf=%08x",
+ SH_LOG("dom=%p caf=%08x taf=%" PRtype_info,
page_get_owner(&frame_table[mfn]),
frame_table[mfn].count_info,
frame_table[mfn].u.inuse.type_info );
@@ -602,14 +602,14 @@
/* XXX This needs more thought... */
printk("%s: needing to call shadow_remove_all_access for mfn=%lx\n",
__func__, page_to_pfn(page));
- printk("Before: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
+ printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
page->count_info, page->u.inuse.type_info);
shadow_lock(d);
shadow_remove_all_access(d, page_to_pfn(page));
shadow_unlock(d);
- printk("After: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
+ printk("After: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
page->count_info, page->u.inuse.type_info);
}
@@ -648,7 +648,7 @@
if ( unlikely(nx == 0) )
{
- printk("get_shadow_ref overflow, gmfn=%x smfn=%lx\n",
+ printk("get_shadow_ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
frame_table[smfn].u.inuse.type_info & PGT_mfn_mask,
smfn);
BUG();
@@ -678,7 +678,8 @@
if ( unlikely(x == 0) )
{
- printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%08x\n",
+ printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%"
+ PRtype_info "\n",
smfn,
frame_table[smfn].count_info,
frame_table[smfn].u.inuse.type_info);
@@ -1200,7 +1201,7 @@
#ifndef NDEBUG
if ( ___shadow_status(d, gpfn, stype) != 0 )
{
- printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x "
+ printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info
" "
"mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
d->domain_id, gpfn, gmfn, stype,
frame_table[gmfn].count_info,
diff -r deff07c1b686 -r 69bf77e1b102 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Sun Aug 7 09:13:39 2005
+++ b/xen/include/asm-x86/x86_64/page.h Mon Aug 8 08:18:06 2005
@@ -42,7 +42,8 @@
#endif /* !__ASSEMBLY__ */
/* Given a virtual address, get an entry offset into a linear page table. */
-#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> PAGE_SHIFT)
+#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
+#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
#define is_guest_l1_slot(_s) (1)
#define is_guest_l2_slot(_t, _s) (1)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|