After memory added, the m2p table and frametable maybe increased, and the
idle_page_table is updated for it.
These new mapping are propgated to other guest, when access to the new
m2p/frame table. The access can happen either in HV or in guest for read-only
mapping.
The propagation is needed for 32 bit Xen environment, or compatibility guest in
64 bit Xen environment.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
diff -r 1e580fac45fb xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Thu Jul 02 23:03:22 2009 +0800
+++ b/xen/arch/x86/traps.c Tue Jul 07 02:34:51 2009 +0800
@@ -1061,13 +1061,24 @@ static int handle_gdt_ldt_mapping_fault(
return EXCRET_fault_fixed;
}
+static inline int in_hypervisor_range(unsigned long va, struct vcpu *v)
+{
#ifdef HYPERVISOR_VIRT_END
-#define IN_HYPERVISOR_RANGE(va) \
- (((va) >= HYPERVISOR_VIRT_START) && ((va) < HYPERVISOR_VIRT_END))
+ if (((va) >= HYPERVISOR_VIRT_START) && ((va) < HYPERVISOR_VIRT_END))
#else
-#define IN_HYPERVISOR_RANGE(va) \
- (((va) >= HYPERVISOR_VIRT_START))
+ if (((va) >= HYPERVISOR_VIRT_START))
#endif
+ return 1;
+ else
+#if defined(__x86_64__)
+ return ( is_pv_32on64_domain(v->domain) ?
+ ((va >= HYPERVISOR_COMPAT_VIRT_START(v->domain) &&
+ (va < MACH2PHYS_COMPAT_VIRT_END)) ) : 0 );
+#else
+ return 0;
+#endif
+}
+
static int __spurious_page_fault(
unsigned long addr, unsigned int error_code)
@@ -1207,12 +1218,15 @@ static int fixup_page_fault(unsigned lon
return ret;
}
- if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
+ if ( unlikely(in_hypervisor_range(addr, v)) )
{
if ( !(regs->error_code & PFEC_reserved_bit) &&
(addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
return handle_gdt_ldt_mapping_fault(
addr - GDT_LDT_VIRT_START, regs);
+ if ( !(regs->error_code & PFEC_page_present) &&
+ (pagefault_by_memadd(addr, regs)) )
+ return handle_memadd_fault(addr, regs);
return 0;
}
diff -r 1e580fac45fb xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c Thu Jul 02 23:03:22 2009 +0800
+++ b/xen/arch/x86/x86_32/mm.c Tue Jul 07 01:48:03 2009 +0800
@@ -381,6 +381,76 @@ int check_descriptor(const struct domain
return 0;
}
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
+{
+ if ( guest_mode(regs) )
+ {
+ if ( ((addr >= RO_MPT_VIRT_START) && (addr < RO_MPT_VIRT_END)) )
+ return 1;
+ }
+ else
+ {
+ if ( (addr >= RO_MPT_VIRT_START) && (addr < RDWR_MPT_VIRT_END) )
+ return 1;
+ }
+
+ return 0;
+}
+
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
+{
+ l3_pgentry_t *pl3e = NULL;
+ l3_pgentry_t l3e;
+ l2_pgentry_t *pl2e = NULL;
+ l2_pgentry_t l2e, idle_l2e;
+ unsigned long mfn, cr3;
+ int rc = 0;
+
+ idle_l2e = idle_pg_table_l2[l2_linear_offset(addr)];
+ if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT))
+ return 0;
+
+ cr3 = read_cr3();
+ mfn = cr3 >> PAGE_SHIFT;
+
+ /*
+ * No need get page type here and validate checking for xen mapping
+ */
+ pl3e = map_domain_page(mfn);
+ pl3e += (cr3 & 0xFE0UL) >> 3;
+ l3e = pl3e[3];
+
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ goto done;
+
+ mfn = l3e_get_pfn(l3e);
+ pl2e = map_domain_page(mfn);
+
+ l2e = pl2e[l2_table_offset(addr)];
+
+ /*
+ * These entry will only be set by Xen HV.
+ * Other vCPU may setup it in another pCPU
+ */
+ if (l2e_get_flags(l2e) & _PAGE_PRESENT)
+ {
+ ASSERT(l2e_get_intpte(l2e) == l2e_get_intpte(idle_l2e));
+ rc = EXCRET_fault_fixed;
+ goto done;
+ }
+
+ pl2e[l2_table_offset(addr)] = idle_l2e;
+
+ rc = EXCRET_fault_fixed;
+
+done:
+ if (pl3e)
+ unmap_domain_page(pl3e);
+ if (pl2e)
+ unmap_domain_page(pl2e);
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff -r 1e580fac45fb xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c Thu Jul 02 23:03:22 2009 +0800
+++ b/xen/arch/x86/x86_64/mm.c Mon Jul 06 23:47:48 2009 +0800
@@ -258,8 +258,12 @@ int setup_m2p_table(unsigned long spfn,
if ( mpt_size > RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START )
mpt_size = RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START;
mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
+
+ /* We don't support change the m2p_compat_vstart with memory hotplug */
+#if !defined(CONFIG_MEMORY_HOTPLUG)
if ( (m2p_compat_vstart + mpt_size) < MACH2PHYS_COMPAT_VIRT_END )
m2p_compat_vstart = MACH2PHYS_COMPAT_VIRT_END - mpt_size;
+#endif
/* Has already setup */
if ( (mpt_size >> PAGE_SHIFT) <= compat_mpt_pages )
@@ -645,6 +649,87 @@ unsigned int domain_clamp_alloc_bitsize(
return min(d->arch.physaddr_bitsize, bits);
}
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
+{
+ struct domain *d = current->domain;
+#ifdef CONFIG_COMPAT
+ if ( is_pv_32on64_domain(d) &&
+ (addr >= HYPERVISOR_COMPAT_VIRT_START(d)) &&
+ (addr < MACH2PHYS_COMPAT_VIRT_END) )
+ return 1;
+#endif
+ return 0;
+}
+
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ struct domain *d = current->domain;
+ l4_pgentry_t *pl4e = NULL;
+ l4_pgentry_t l4e;
+ l3_pgentry_t *pl3e = NULL;
+ l3_pgentry_t l3e;
+ l2_pgentry_t *pl2e = NULL;
+ l2_pgentry_t l2e, idle_l2e;
+ unsigned long mfn, idle_index;
+ int rc = 0;
+
+ if (!is_pv_32on64_domain(d))
+ return 0;
+
+ idle_index = l2_table_offset(addr) -
+ COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d);
+ idle_l2e = compat_idle_pg_table_l2[idle_index];
+ if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT))
+ return 0;
+
+ mfn = (read_cr3()) >> PAGE_SHIFT;
+
+ pl4e = map_domain_page(mfn);
+
+ l4e = pl4e[0];
+
+ if (!(l4e_get_flags(l4e) & _PAGE_PRESENT))
+ goto done;
+
+ mfn = l4e_get_pfn(l4e);
+ /* We don't need get page type here since this is xen mapping */
+ pl3e = map_domain_page(mfn);
+
+ l3e = pl3e[3];
+
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ goto done;
+
+ mfn = l3e_get_pfn(l3e);
+ pl2e = map_domain_page(mfn);
+
+ l2e = pl2e[l2_table_offset(addr)];
+
+ /*
+ * Race happen that other pCPU may have fixed it
+ */
+ if (l2e_get_flags(l2e) & _PAGE_PRESENT)
+ {
+ ASSERT(l2e_get_intpte(l2e) == l2e_get_intpte(idle_l2e));
+ rc = EXCRET_fault_fixed;
+ goto done;
+ }
+
+ pl2e[l2_table_offset(addr)] = idle_l2e;
+
+ rc = EXCRET_fault_fixed;
+#endif
+done:
+ if (pl4e)
+ unmap_domain_page(pl4e);
+ if (pl3e)
+ unmap_domain_page(pl3e);
+ if (pl2e)
+ unmap_domain_page(pl2e);
+ return rc;
+}
+
#include "compat/mm.c"
/*
diff -r 1e580fac45fb xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Thu Jul 02 23:03:22 2009 +0800
+++ b/xen/include/asm-x86/mm.h Thu Jul 02 23:03:22 2009 +0800
@@ -518,6 +518,8 @@ int setup_m2p_table(unsigned long spfn,
int setup_m2p_table(unsigned long spfn, unsigned long epfn, int hotplug);
unsigned long domain_get_maximum_gpfn(struct domain *d);
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
extern struct domain *dom_xen, *dom_io; /* for vmcoreinfo */
#endif /* __ASM_X86_MM_H__ */
page_fault.patch
Description: page_fault.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|