After memory added, the m2p table and frametable maybe increased, and the
idle_page_table is updated for it.
These new mapping are propgated to other guest, when access to the new
m2p/frame table. The access can happen either in HV or in guest for read-only
mapping.
The propagation is needed for 32 bit Xen environment, or compatibility guest in
64 bit Xen environment.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>
diff -r fbba70d76aec xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Sun Jun 28 02:34:55 2009 +0800
+++ b/xen/arch/x86/traps.c Sun Jun 28 02:34:57 2009 +0800
@@ -1213,6 +1213,9 @@ static int fixup_page_fault(unsigned lon
(addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
return handle_gdt_ldt_mapping_fault(
addr - GDT_LDT_VIRT_START, regs);
+ if ( !(regs->error_code & PFEC_page_present) &&
+ (pagefault_by_memadd(addr, regs)) )
+ return handle_memadd_fault(addr, regs);
return 0;
}
diff -r fbba70d76aec xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c Sun Jun 28 02:34:55 2009 +0800
+++ b/xen/arch/x86/x86_32/mm.c Sun Jun 28 02:41:09 2009 +0800
@@ -383,6 +383,63 @@ int check_descriptor(const struct domain
return 0;
}
+
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
+{
+ if ( guest_mode(regs) )
+ {
+ if ( ((addr >= RO_MPT_VIRT_START) && (addr < RO_MPT_VIRT_END)) )
+ return 1;
+ }
+ else
+ {
+ if ( (addr >= RO_MPT_VIRT_START) && (addr < RDWR_MPT_VIRT_END) )
+ return 1;
+ }
+
+ return 0;
+}
+
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
+{
+ l3_pgentry_t *pl3e;
+ l3_pgentry_t l3e;
+ l2_pgentry_t *pl2e;
+ l2_pgentry_t l2e, idle_l2e;
+ unsigned long mfn, cr3;
+
+ idle_l2e = idle_pg_table_l2[l2_linear_offset(addr)];
+ if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT))
+ return 0;
+
+ cr3 = read_cr3();
+ mfn = cr3 >> PAGE_SHIFT;
+
+ /*
+ * No need get page type here and validate checking for xen mapping
+ */
+ pl3e = map_domain_page(mfn);
+ pl3e += (cr3 & 0xFE0UL) >> 3;
+ l3e = pl3e[3];
+
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ return 0;
+
+ mfn = l3e_get_pfn(l3e);
+ pl2e = map_domain_page(mfn);
+
+ l2e = pl2e[l2_table_offset(addr)];
+
+ if (l2e_get_flags(l2e) & _PAGE_PRESENT)
+ return 0;
+
+ memcpy(&pl2e[l2_table_offset(addr)],
+ &idle_pg_table_l2[l2_linear_offset(addr)],
+ sizeof(l2_pgentry_t));
+
+ return EXCRET_fault_fixed;
+}
+
/*
* Local variables:
* mode: C
diff -r fbba70d76aec xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c Sun Jun 28 02:34:55 2009 +0800
+++ b/xen/arch/x86/x86_64/mm.c Sun Jun 28 02:42:08 2009 +0800
@@ -643,6 +643,76 @@ unsigned int domain_clamp_alloc_bitsize(
return min(d->arch.physaddr_bitsize, bits);
}
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs)
+{
+ struct domain *d = current->domain;
+#ifdef CONFIG_COMPAT
+ if (guest_mode(regs) &&
+ ((addr >= HYPERVISOR_COMPAT_VIRT_START(d)) &&
+ (addr < MACH2PHYS_COMPAT_VIRT_END)) )
+ return 1;
+#endif
+ return 0;
+}
+
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ struct domain *d = current->domain;
+ l4_pgentry_t *pl4e;
+ l4_pgentry_t l4e;
+ l3_pgentry_t *pl3e;
+ l3_pgentry_t l3e;
+ l2_pgentry_t *pl2e;
+ l2_pgentry_t l2e, idle_l2e;
+ unsigned long mfn, idle_index;
+
+ if (!is_pv_32on64_domain(d))
+ return 0;
+
+ mfn = (read_cr3()) >> PAGE_SHIFT;
+
+ pl4e = map_domain_page(mfn);
+
+ l4e = pl4e[0];
+
+ if (!(l4e_get_flags(l4e) & _PAGE_PRESENT))
+ return 0;
+
+ mfn = l4e_get_pfn(l4e);
+ /* We don't need get page type here since it is current CR3 */
+ pl3e = map_domain_page(mfn);
+
+ l3e = pl3e[3];
+
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ return 0;
+
+ mfn = l3e_get_pfn(l3e);
+ pl2e = map_domain_page(mfn);
+
+ l2e = pl2e[l2_table_offset(addr)];
+
+ if (l2e_get_flags(l2e) & _PAGE_PRESENT)
+ return 0;
+
+ idle_index = (l2_table_offset(addr) -
+ COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d))/
+ sizeof(l2_pgentry_t);
+ idle_l2e = compat_idle_pg_table_l2[idle_index];
+ if (!(l2e_get_flags(idle_l2e) & _PAGE_PRESENT))
+ return 0;
+ memcpy(&pl2e[l2_table_offset(addr)],
&compat_idle_pg_table_l2[l2_table_offset(addr)],
+ sizeof(l2_pgentry_t));
+
+ memcpy(&pl2e[l2_table_offset(addr)],
+ &compat_idle_pg_table_l2[idle_index],
+ sizeof(l2_pgentry_t));
+ return EXCRET_fault_fixed;
+#endif
+ return 0;
+}
+
#include "compat/mm.c"
/*
diff -r fbba70d76aec xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Sun Jun 28 02:34:55 2009 +0800
+++ b/xen/include/asm-x86/mm.h Sun Jun 28 02:34:57 2009 +0800
@@ -518,6 +518,8 @@ int setup_m2p_table(unsigned long spfn,
int setup_m2p_table(unsigned long spfn, unsigned long epfn, int hotplug);
unsigned long domain_get_maximum_gpfn(struct domain *d);
+int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
+int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
extern struct domain *dom_xen, *dom_io; /* for vmcoreinfo */
#endif /* __ASM_X86_MM_H__ */
page_fault.patch
Description: page_fault.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|