# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1168597356 0
# Node ID 7c5c72a0283f433949d9da82ae197da4f27861eb
# Parent ded167dc4dc9180ee5ad79de2e9116bf2919d21f
[LINUX] x86/64: Sync pagetable management with i386 Xen code.
PUDs,PMDs,PTEs are all marked as ForeignPage so that they can be
grabbed from tlb_remove_page() at the appropriate time and freed in a
special way.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c | 16 +
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h | 85
+++-------
2 files changed, 48 insertions(+), 53 deletions(-)
diff -r ded167dc4dc9 -r 7c5c72a0283f
linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c Fri Jan 12
10:13:25 2007 +0000
+++ b/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c Fri Jan 12
10:22:36 2007 +0000
@@ -164,6 +164,18 @@ void _arch_exit_mmap(struct mm_struct *m
mm_unpin(mm);
}
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *pte;
+
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ if (pte) {
+ SetPageForeign(pte, pte_free);
+ set_page_count(pte, 1);
+ }
+ return pte;
+}
+
void pte_free(struct page *pte)
{
unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
@@ -171,6 +183,10 @@ void pte_free(struct page *pte)
if (!pte_write(*virt_to_ptep(va)))
BUG_ON(HYPERVISOR_update_va_mapping(
va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
+
+ ClearPageForeign(pte);
+ set_page_count(pte, 1);
+
__free_page(pte);
}
#endif /* CONFIG_XEN */
diff -r ded167dc4dc9 -r 7c5c72a0283f
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h Fri Jan
12 10:13:25 2007 +0000
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h Fri Jan
12 10:22:36 2007 +0000
@@ -64,50 +64,43 @@ static inline void pgd_populate(struct m
}
}
-static inline void pmd_free(pmd_t *pmd)
-{
- pte_t *ptep = virt_to_ptep(pmd);
-
- if (!pte_write(*ptep)) {
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)pmd,
- pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
- 0));
- }
- free_page((unsigned long)pmd);
-}
+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern void pte_free(struct page *pte);
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- return pmd;
+ struct page *pg;
+
+ pg = pte_alloc_one(mm, addr);
+ return pg ? page_address(pg) : NULL;
+}
+
+static inline void pmd_free(pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pte_free(virt_to_page(pmd));
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- return pud;
+ struct page *pg;
+
+ pg = pte_alloc_one(mm, addr);
+ return pg ? page_address(pg) : NULL;
}
static inline void pud_free(pud_t *pud)
{
- pte_t *ptep = virt_to_ptep(pud);
-
- if (!pte_write(*ptep)) {
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)pud,
- pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
- 0));
- }
- free_page((unsigned long)pud);
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ pte_free(virt_to_page(pud));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- /*
- * We allocate two contiguous pages for kernel and user.
- */
- unsigned boundary;
+ /*
+ * We allocate two contiguous pages for kernel and user.
+ */
+ unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
if (!pgd)
@@ -124,11 +117,11 @@ static inline pgd_t *pgd_alloc(struct mm
(PTRS_PER_PGD - boundary) * sizeof(pgd_t));
memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
- /*
- * Set level3_user_pgt for vsyscall area
- */
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
- mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
return pgd;
}
@@ -160,18 +153,10 @@ static inline void pgd_free(pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long
address)
{
- pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- if (pte)
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pte)
make_page_readonly(pte, XENFEAT_writable_page_tables);
- return pte;
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long
address)
-{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
return pte;
}
@@ -181,18 +166,12 @@ static inline void pte_free_kernel(pte_t
static inline void pte_free_kernel(pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
- make_page_writable(pte, XENFEAT_writable_page_tables);
+ make_page_writable(pte, XENFEAT_writable_page_tables);
free_page((unsigned long)pte);
}
-extern void pte_free(struct page *pte);
-
-//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-//#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-//#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-
-#define __pte_free_tlb(tlb,x) pte_free((x))
-#define __pmd_free_tlb(tlb,x) pmd_free((x))
-#define __pud_free_tlb(tlb,x) pud_free((x))
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
+#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif /* _X86_64_PGALLOC_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|