WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH] x86/PAE partially linear page table support

To: "Keir Fraser" <keir@xxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH] x86/PAE partially linear page table support
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Fri, 16 Feb 2007 09:34:22 +0000
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Fri, 16 Feb 2007 01:33:09 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <C1FA0F77.97A5%keir@xxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <45D46965.76E4.0078.0@xxxxxxxxxx> <C1FA0F77.97A5%keir@xxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
>>> Keir Fraser <keir@xxxxxxxxxxxxx> 15.02.07 14:24 >>>
>On 15/2/07 13:08, "Jan Beulich" <jbeulich@xxxxxxxxxx> wrote:
>> But even for the general case - wouldn't it be possible to call
>> relinquish_memory() CONFIG_PAGING_LEVELS-1 times, each time lowering
>> the level of page tables that can be forced invalid by one?
>
>Yeah, this would work. Seems acceptable.

Okay, so here's a second try:

While full linear page table support makes little sense (and would be more
complicated to implement), partial linear page table support is almost identical
to that in non-PAE, and is used (at least) by NetWare.

Of course, the question here is whether this shouldn't be generalized - all
validated page tables (except for top level PAE) could be allowed to be 
installed
at lower levels using the same scheme.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-02-07/xen/arch/x86/domain.c
===================================================================
--- 2007-02-07.orig/xen/arch/x86/domain.c       2007-02-07 16:27:53.000000000 
+0100
+++ 2007-02-07/xen/arch/x86/domain.c    2007-02-15 15:23:47.000000000 +0100
@@ -1333,7 +1333,8 @@ int hypercall_xlat_continuation(unsigned
 }
 #endif
 
-static void relinquish_memory(struct domain *d, struct list_head *list)
+static void relinquish_memory(struct domain *d, struct list_head *list,
+                              unsigned long type)
 {
     struct list_head *ent;
     struct page_info  *page;
@@ -1362,23 +1363,24 @@ static void relinquish_memory(struct dom
             put_page(page);
 
         /*
-         * Forcibly invalidate base page tables at this point to break circular
-         * 'linear page table' references. This is okay because MMU structures
-         * are not shared across domains and this domain is now dead. Thus base
-         * tables are not in use so a non-zero count means circular reference.
+         * Forcibly invalidate top-most, still valid page tables at this point
+         * to break circular 'linear page table' references. This is okay
+         * because MMU structures are not shared across domains and this domain
+         * is now dead. Thus top-most valid tables are not in use so a non-zero
+         * count means circular reference.
          */
         y = page->u.inuse.type_info;
         for ( ; ; )
         {
             x = y;
             if ( likely((x & (PGT_type_mask|PGT_validated)) !=
-                        (PGT_base_page_table|PGT_validated)) )
+                        (type|PGT_validated)) )
                 break;
 
             y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);
             if ( likely(y == x) )
             {
-                free_page_type(page, PGT_base_page_table);
+                free_page_type(page, type);
                 break;
             }
         }
@@ -1476,8 +1478,16 @@ void domain_relinquish_resources(struct 
         destroy_gdt(v);
 
     /* Relinquish every page of memory. */
-    relinquish_memory(d, &d->xenpage_list);
-    relinquish_memory(d, &d->page_list);
+#if CONFIG_PAGING_LEVELS >= 4
+    relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
+    relinquish_memory(d, &d->page_list, PGT_l4_page_table);
+#endif
+#if CONFIG_PAGING_LEVELS >= 3
+    relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
+    relinquish_memory(d, &d->page_list, PGT_l3_page_table);
+#endif
+    relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
+    relinquish_memory(d, &d->page_list, PGT_l2_page_table);
 
     /* Free page used by xen oprofile buffer */
     free_xenoprof_pages(d);
Index: 2007-02-07/xen/arch/x86/mm.c
===================================================================
--- 2007-02-07.orig/xen/arch/x86/mm.c   2007-02-07 16:08:05.000000000 +0100
+++ 2007-02-07/xen/arch/x86/mm.c        2007-02-07 16:31:33.000000000 +0100
@@ -547,7 +547,7 @@ get_linear_pagetable(
             return 0;
 
         /*
-         * Make sure that the mapped frame is an already-validated L2 table. 
+         * Make sure that the mapped frame is an already-validated root table.
          * If so, atomically increment the count (checking for overflow).
          */
         page = mfn_to_page(pfn);
@@ -569,6 +569,51 @@ get_linear_pagetable(
 }
 #endif /* !CONFIG_X86_PAE */
 
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+static int
+get_l2_linear_pagetable(
+    l2_pgentry_t l2e, unsigned long l2e_pfn, struct domain *d)
+{
+    unsigned long pfn;
+
+    if ( (l2e_get_flags(l2e) & _PAGE_RW) )
+    {
+        MEM_LOG("Attempt to create linear p.t. with write perms");
+        return 0;
+    }
+
+    if ( (pfn = l2e_get_pfn(l2e)) != l2e_pfn )
+    {
+        unsigned long x, y;
+        struct page_info *page;
+
+        /* Make sure the mapped frame belongs to the correct domain. */
+        if ( unlikely(!get_page_from_pagenr(pfn, d)) )
+            return 0;
+
+        /*
+         * Make sure that the mapped frame is an already-validated L2 table.
+         * If so, atomically increment the count (checking for overflow).
+         */
+        page = mfn_to_page(pfn);
+        y = page->u.inuse.type_info;
+        do {
+            x = y;
+            if ( unlikely((x & PGT_count_mask) == PGT_count_mask) ||
+                 unlikely((x & (PGT_type_mask|PGT_validated)) !=
+                          (PGT_l2_page_table|PGT_validated)) )
+            {
+                put_page(page);
+                return 0;
+            }
+        }
+        while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x );
+    }
+
+    return 1;
+}
+#endif /* !CONFIG_X86_PAE */
+
 int
 get_page_from_l1e(
     l1_pgentry_t l1e, struct domain *d)
@@ -645,10 +690,16 @@ get_page_from_l2e(
     }
 
     rc = get_page_and_type_from_pagenr(l2e_get_pfn(l2e), PGT_l1_page_table, d);
-#if CONFIG_PAGING_LEVELS == 2
     if ( unlikely(!rc) )
+    {
+#if CONFIG_PAGING_LEVELS == 2
         rc = get_linear_pagetable(l2e, pfn, d);
+#else
+        if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) )
+            rc = get_l2_linear_pagetable(l2e, pfn, d);
 #endif
+    }
+
     return rc;
 }
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel