WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Fix problems with direct-mapping handling especially whe

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Fix problems with direct-mapping handling especially when
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 16 Feb 2006 18:22:08 +0000
Delivery-date: Thu, 16 Feb 2006 18:35:41 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID a543a4778a7d9888fad13ffafff0aa9bc5541b48
# Parent  bee659de2a369cbf1da2e7751c0eb5e2f0e11dec
Fix problems with direct-mapping handling especially when
VMX assist is used for real mode and protected mode.

Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Signed-off-by: Xin B Li <xin.b.li@xxxxxxxxx>
Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx>

diff -r bee659de2a36 -r a543a4778a7d xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Feb 16 15:35:09 2006
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Feb 16 15:46:21 2006
@@ -799,6 +799,7 @@
         struct domain *d = v->domain;
         if (d->arch.hvm_domain.shared_page_va)
             unmap_domain_page((void *)d->arch.hvm_domain.shared_page_va);
+        shadow_direct_map_clean(v);
     }
 
     destroy_vmcb(&v->arch.hvm_svm);
@@ -1443,9 +1444,7 @@
                 put_page(mfn_to_page(old_base_mfn));
        }
 #endif
-#if CONFIG_PAGING_LEVELS == 2
-        shadow_direct_map_clean(v);
-#endif
+
         /* Now arch.guest_table points to machine physical. */
         v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
         update_pagetables(v);
diff -r bee659de2a36 -r a543a4778a7d xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Feb 16 15:35:09 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Feb 16 15:46:21 2006
@@ -88,6 +88,7 @@
         if ( d->arch.hvm_domain.shared_page_va )
             unmap_domain_page_global(
                (void *)d->arch.hvm_domain.shared_page_va);
+        shadow_direct_map_clean(v);
     }
 
     vmx_request_clear_vmcs(v);
@@ -1227,9 +1228,7 @@
                 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
             }
         }
-#if CONFIG_PAGING_LEVELS == 2
-        shadow_direct_map_clean(v);
-#endif
+
         /*
          * Now arch.guest_table points to machine physical.
          */
diff -r bee659de2a36 -r a543a4778a7d xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Thu Feb 16 15:35:09 2006
+++ b/xen/arch/x86/shadow.c     Thu Feb 16 15:46:21 2006
@@ -3743,6 +3743,7 @@
 #if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) ||  \
     ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
 
+
 /* 
  * Use GUEST_PGENTRY_32 to force PAE_SHADOW_SELF_ENTRY for L4.
  *
@@ -3756,8 +3757,8 @@
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    l3_pgentry_t sl3e;
-    l2_pgentry_t sl2e;
+    l3_pgentry_t sl3e, *sl3e_p;
+    l2_pgentry_t sl2e, *sl2e_p;
     l1_pgentry_t sl1e;
     unsigned long mfn, smfn;
     struct page_info *page;
@@ -3773,37 +3774,47 @@
 
     shadow_lock(d);
 
-    __shadow_get_l3e(v, vpa, &sl3e);
+    __direct_get_l3e(v, vpa, &sl3e);
 
     if ( !(l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 
     {
         page = alloc_domheap_page(NULL);
         if ( !page )
-            goto fail; 
+            goto nomem; 
+
         smfn = page_to_mfn(page);
         sl3e = l3e_from_pfn(smfn, _PAGE_PRESENT);
-        __shadow_set_l3e(v, vpa, &sl3e);
-    }
-
-    __shadow_get_l2e(v, vpa, &sl2e);
+
+        sl3e_p = (l3_pgentry_t *)map_domain_page(smfn);
+        memset(sl3e_p, 0, PAGE_SIZE);
+        unmap_domain_page(sl3e_p);
+
+        __direct_set_l3e(v, vpa, &sl3e);
+    }
+
+    __direct_get_l2e(v, vpa, &sl2e);
 
     if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) ) 
     {
         page = alloc_domheap_page(NULL);
         if ( !page )
-            goto fail; 
+            goto nomem; 
+
         smfn = page_to_mfn(page);
-
         sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
-        __shadow_set_l2e(v, vpa, &sl2e);
-    }
-
-    __shadow_get_l1e(v, vpa, &sl1e);
+        sl2e_p = (l2_pgentry_t *)map_domain_page(smfn);
+        memset(sl2e_p, 0, PAGE_SIZE);
+        unmap_domain_page(sl2e_p);
+
+        __direct_set_l2e(v, vpa, &sl2e);
+    }
+
+    __direct_get_l1e(v, vpa, &sl1e);
 
     if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) ) 
     {
         sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
-        __shadow_set_l1e(v, vpa, &sl1e);
+        __direct_set_l1e(v, vpa, &sl1e);
     } 
 
     shadow_unlock(d);
@@ -3811,6 +3822,10 @@
 
 fail:
     return 0;
+
+nomem:
+    shadow_direct_map_clean(v);
+    domain_crash_synchronous();
 }
 #endif
 
diff -r bee659de2a36 -r a543a4778a7d xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Thu Feb 16 15:35:09 2006
+++ b/xen/arch/x86/shadow32.c   Thu Feb 16 15:46:21 2006
@@ -807,21 +807,99 @@
     v->arch.monitor_vtable = 0;
 }
 
+static int
+map_p2m_entry(
+    l1_pgentry_t *l1tab, unsigned long va, unsigned long gpa, unsigned long 
mfn)
+{
+    unsigned long *l0tab = NULL;
+    l1_pgentry_t l1e = { 0 };
+    struct page_info *page;
+
+    l1e = l1tab[l1_table_offset(va)];
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            goto fail;
+
+        if ( l0tab  )
+            unmap_domain_page(l0tab);
+        l0tab = map_domain_page(page_to_mfn(page));
+        memset(l0tab, 0, PAGE_SIZE );
+        l1e = l1tab[l1_table_offset(va)] =
+            l1e_from_page(page, __PAGE_HYPERVISOR);
+    }
+    else if ( l0tab == NULL)
+        l0tab = map_domain_page(l1e_get_pfn(l1e));
+
+    l0tab[gpa & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn;
+
+    if ( l0tab )
+        unmap_domain_page(l0tab);
+
+    return 1;
+
+fail:
+    return 0;
+}
+
 int
 set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
               struct domain_mmap_cache *l2cache,
               struct domain_mmap_cache *l1cache)
 {
-    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
+    unsigned long tabpfn;
     l2_pgentry_t *l2, l2e;
     l1_pgentry_t *l1;
     struct page_info *l1page;
     unsigned long va = pfn << PAGE_SHIFT;
+    int error;
+
+    if ( shadow_mode_external(d) )
+    {
+        tabpfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table);
+        va = RO_MPT_VIRT_START + (pfn * sizeof (unsigned long));
+    }
+    else
+    {
+        tabpfn = pagetable_get_pfn(d->arch.phys_table);
+        va = pfn << PAGE_SHIFT;
+    }
 
     ASSERT(tabpfn != 0);
     ASSERT(shadow_lock_is_acquired(d));
 
     l2 = map_domain_page_with_cache(tabpfn, l2cache);
+
+    /*
+     * The following code covers (SHM_translate | SHM_external) mode.
+     */
+
+    if ( shadow_mode_external(d) )
+    {
+        l1_pgentry_t *l1tab = NULL;
+        l2_pgentry_t l2e;
+
+        l2e = l2[l2_table_offset(va)];
+
+        ASSERT( l2e_get_flags(l2e) & _PAGE_PRESENT );
+
+        l1tab = map_domain_page(l2e_get_pfn(l2e));
+        error = map_p2m_entry(l1tab, va, pfn, mfn);
+        if ( !error )
+            domain_crash_synchronous(); 
+
+        unmap_domain_page(l1tab);
+        unmap_domain_page_with_cache(l2, l2cache);
+
+        return 1;
+    }
+
+    /*
+     * The following code covers SHM_translate mode.
+     */
+    ASSERT(shadow_mode_translate(d));
+
     l2e = l2[l2_table_offset(va)];
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
     {
@@ -856,13 +934,10 @@
 
     l2_pgentry_t *l2tab = NULL;
     l1_pgentry_t *l1tab = NULL;
-    unsigned long *l0tab = NULL;
     l2_pgentry_t l2e = { 0 };
-    l1_pgentry_t l1e = { 0 };
-
     struct page_info *page;
-    unsigned long pfn;
-    int i;
+    unsigned long gpfn, mfn;
+    int error;
 
     if ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) )
     {
@@ -892,34 +967,22 @@
 
     list_ent = d->page_list.next;
 
-    for ( i = 0; list_ent != &d->page_list; i++ )
+    for ( gpfn = 0; list_ent != &d->page_list; gpfn++ )
     {
         page = list_entry(list_ent, struct page_info, list);
-        pfn = page_to_mfn(page);
-
-        l1e = l1tab[l1_table_offset(va)];
-        if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
-        {
-            page = alloc_domheap_page(NULL);
-            if ( l0tab  )
-                unmap_domain_page(l0tab);
-            l0tab = map_domain_page(page_to_mfn(page));
-            memset(l0tab, 0, PAGE_SIZE );
-            l1e = l1tab[l1_table_offset(va)] =
-                l1e_from_page(page, __PAGE_HYPERVISOR);
-        }
-        else if ( l0tab == NULL)
-            l0tab = map_domain_page(l1e_get_pfn(l1e));
-
-        l0tab[i & ((1 << PAGETABLE_ORDER) - 1) ] = pfn;
-        list_ent = frame_table[pfn].list.next;
-        va += sizeof(pfn);
+        mfn = page_to_mfn(page);
+
+        error = map_p2m_entry(l1tab, va, gpfn, mfn);
+        if ( !error )
+            domain_crash_synchronous(); 
+
+        list_ent = frame_table[mfn].list.next;
+        va += sizeof(mfn);
     }
 
     if (l2tab)
         unmap_domain_page(l2tab);
     unmap_domain_page(l1tab);
-    unmap_domain_page(l0tab);
 
     return 1;
 }
@@ -981,21 +1044,26 @@
     }
 
     shadow_lock(d);
-
-    __shadow_get_l2e(v, vpa, &sl2e);
-
-   if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
+  
+   __direct_get_l2e(v, vpa, &sl2e);
+
+    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
     {
         page = alloc_domheap_page(NULL);
         if ( !page )
-            goto fail;
+            goto nomem;
 
         smfn = page_to_mfn(page);
         sl2e = l2e_from_pfn(smfn, __PAGE_HYPERVISOR | _PAGE_USER);
-        __shadow_set_l2e(v, vpa, sl2e);
-    }
-
-    sple = (l1_pgentry_t *)map_domain_page(l2e_get_pfn(sl2e));
+
+        sple = (l1_pgentry_t *)map_domain_page(smfn);
+        memset(sple, 0, PAGE_SIZE);
+        __direct_set_l2e(v, vpa, sl2e);
+    } 
+
+    if ( !sple )
+        sple = (l1_pgentry_t *)map_domain_page(l2e_get_pfn(sl2e));
+
     sl1e = sple[l1_table_offset(vpa)];
 
     if ( !(l1e_get_flags(sl1e) & _PAGE_PRESENT) )
@@ -1003,13 +1071,19 @@
         sl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR | _PAGE_USER);
         sple[l1_table_offset(vpa)] = sl1e;
     }
-    unmap_domain_page(sple);
+
+    if (sple)
+        unmap_domain_page(sple);
+
     shadow_unlock(d);
-
     return EXCRET_fault_fixed;
 
 fail:
     return 0;
+
+nomem:
+    shadow_direct_map_clean(v);
+    domain_crash_synchronous();
 }
 
 
@@ -1021,16 +1095,12 @@
     if ( !(page = alloc_domheap_page(NULL)) )
         goto fail;
 
-    root = map_domain_page_global(page_to_mfn(page));
+    root = map_domain_page(page_to_mfn(page));
     memset(root, 0, PAGE_SIZE);
+    unmap_domain_page(root);
 
     v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
-    /* 
-     * We need to set shadow_vtable to get __shadow_set/get_xxx
-     * working
-     */
-    v->arch.shadow_vtable = (l2_pgentry_t *) root;
-    v->arch.shadow_table = mk_pagetable(0);
+
     return 1;
 
 fail:
@@ -1042,9 +1112,8 @@
     int i;
     l2_pgentry_t *l2e;
 
-    ASSERT ( v->arch.shadow_vtable );
-
-    l2e = v->arch.shadow_vtable;
+    l2e = map_domain_page(
+      pagetable_get_pfn(v->domain->arch.phys_table));
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
     {
@@ -1055,8 +1124,7 @@
     free_domheap_page(
             mfn_to_page(pagetable_get_pfn(v->domain->arch.phys_table)));
 
-    unmap_domain_page_global(v->arch.shadow_vtable);
-    v->arch.shadow_vtable = 0;
+    unmap_domain_page(l2e);
     v->domain->arch.phys_table = mk_pagetable(0);
 }
 
@@ -1168,13 +1236,6 @@
                 printk("alloc_p2m_table failed (out-of-memory?)\n");
                 goto nomem;
             }
-        }
-        else
-        {
-            // external guests provide their own memory for their P2M maps.
-            //
-            ASSERT(d == page_get_owner(mfn_to_page(pagetable_get_pfn(
-                d->arch.phys_table))));
         }
     }
 
diff -r bee659de2a36 -r a543a4778a7d xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Thu Feb 16 15:35:09 2006
+++ b/xen/arch/x86/shadow_public.c      Thu Feb 16 15:46:21 2006
@@ -44,33 +44,28 @@
     if ( !(page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA)) )
         goto fail;
 
-    root = map_domain_page_global(page_to_mfn(page));
+    root = map_domain_page(page_to_mfn(page));
     memset(root, 0, PAGE_SIZE);
     root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
 
     v->domain->arch.phys_table = mk_pagetable(page_to_maddr(page));
-    /* 
-     * We need to set shadow_vtable to get __shadow_set/get_xxx
-     * working
-     */
-    v->arch.shadow_vtable = (l2_pgentry_t *) root;
-
+
+    unmap_domain_page(root);
     return 1;
-    
+
 fail:
     return 0;
 }
 
-static void shadow_direct_map_clean(struct vcpu *v)
+void shadow_direct_map_clean(struct vcpu *v)
 {
     l2_pgentry_t *l2e;
     l3_pgentry_t *l3e;
     int i, j;
 
-    ASSERT ( v->arch.shadow_vtable );
-
-    l3e = (l3_pgentry_t *) v->arch.shadow_vtable;
-    
+    l3e = (l3_pgentry_t *)map_domain_page(
+        pagetable_get_pfn(v->domain->arch.phys_table));
+
     for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
     {
         if ( l3e_get_flags(l3e[i]) & _PAGE_PRESENT )
@@ -90,8 +85,7 @@
     free_domheap_page(
         mfn_to_page(pagetable_get_pfn(v->domain->arch.phys_table)));
 
-    unmap_domain_page_global(v->arch.shadow_vtable);
-    v->arch.shadow_vtable = 0;
+    unmap_domain_page(l3e);
     v->domain->arch.phys_table = mk_pagetable(0);
 }
 
@@ -102,12 +96,7 @@
 
 int shadow_set_guest_paging_levels(struct domain *d, int levels)
 {
-    struct vcpu *v = current;
     shadow_lock(d);
-
-    if ( shadow_mode_translate(d) && 
-         !(pagetable_get_paddr(v->domain->arch.phys_table)) )
-         shadow_direct_map_clean(v);
 
     switch(levels) {
 #if CONFIG_PAGING_LEVELS == 4
@@ -1330,164 +1319,166 @@
     return shadow_mode_refcounts(d);
 }
 
-int
-set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
-              struct domain_mmap_cache *l2cache,
-              struct domain_mmap_cache *l1cache)
-{
-    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
-    l2_pgentry_t *l2, l2e;
-    l1_pgentry_t *l1;
-    struct page_info *l1page;
-    unsigned long va = pfn << PAGE_SHIFT;
-
-    ASSERT(tabpfn != 0);
-
-    l2 = map_domain_page_with_cache(tabpfn, l2cache);
-    l2e = l2[l2_table_offset(va)];
-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
-    {
-        l1page = alloc_domheap_page(NULL);
-        if ( !l1page )
-        {
-            unmap_domain_page_with_cache(l2, l2cache);
-            return 0;
-        }
-
-        l1 = map_domain_page_with_cache(page_to_mfn(l1page), l1cache);
-        memset(l1, 0, PAGE_SIZE);
-        unmap_domain_page_with_cache(l1, l1cache);
-
-        l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR);
-        l2[l2_table_offset(va)] = l2e;
-    }
-    unmap_domain_page_with_cache(l2, l2cache);
-
-    l1 = map_domain_page_with_cache(l2e_get_pfn(l2e), l1cache);
-    l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
-    unmap_domain_page_with_cache(l1, l1cache);
-
-    return 1;
-}
-
-int
-alloc_p2m_table(struct domain *d)
-{
-    struct list_head *list_ent;
-    unsigned long va = RO_MPT_VIRT_START; /*  phys_to_machine_mapping */
-
+static int
+map_p2m_entry(
+    pgentry_64_t *top_tab, unsigned long va, unsigned long gpa, unsigned long 
mfn)
+{
 #if CONFIG_PAGING_LEVELS >= 4
-    l4_pgentry_t *l4tab = NULL;
-    l4_pgentry_t l4e = { 0 };
+    pgentry_64_t l4e = { 0 };
 #endif
 #if CONFIG_PAGING_LEVELS >= 3
-    l3_pgentry_t *l3tab = NULL;
-    l3_pgentry_t l3e = { 0 };
+    pgentry_64_t *l3tab = NULL;
+    pgentry_64_t l3e = { 0 };
 #endif
     l2_pgentry_t *l2tab = NULL;
     l1_pgentry_t *l1tab = NULL;
     unsigned long *l0tab = NULL;
     l2_pgentry_t l2e = { 0 };
     l1_pgentry_t l1e = { 0 };
-
-    unsigned long pfn;
-    int i;
+    struct page_info *page;
+
+#if CONFIG_PAGING_LEVELS >= 4
+    l4e = top_tab[l4_table_offset(va)];
+    if ( !(entry_get_flags(l4e) & _PAGE_PRESENT) ) 
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            goto nomem;
+
+        l3tab = map_domain_page(page_to_mfn(page));
+        memset(l3tab, 0, PAGE_SIZE);
+        l4e = top_tab[l4_table_offset(va)] = 
+            entry_from_page(page, __PAGE_HYPERVISOR);
+    } 
+    else if ( l3tab == NULL)
+        l3tab = map_domain_page(entry_get_pfn(l4e));
+
+    l3e = l3tab[l3_table_offset(va)];
+#else
+    l3e = top_tab[l3_table_offset(va)];
+#endif
+    if ( !(entry_get_flags(l3e) & _PAGE_PRESENT) ) 
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            goto nomem;
+
+        l2tab = map_domain_page(page_to_mfn(page));
+        memset(l2tab, 0, PAGE_SIZE);
+        l3e = l3tab[l3_table_offset(va)] = 
+            entry_from_page(page, __PAGE_HYPERVISOR);
+    } 
+    else if ( l2tab == NULL) 
+        l2tab = map_domain_page(entry_get_pfn(l3e));
+
+    l2e = l2tab[l2_table_offset(va)];
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            goto nomem;
+
+        l1tab = map_domain_page(page_to_mfn(page));
+        memset(l1tab, 0, PAGE_SIZE);
+        l2e = l2tab[l2_table_offset(va)] = 
+            l2e_from_page(page, __PAGE_HYPERVISOR);
+    } 
+    else if ( l1tab == NULL) 
+        l1tab = map_domain_page(l2e_get_pfn(l2e));
+
+    l1e = l1tab[l1_table_offset(va)];
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            goto nomem;
+
+        l0tab = map_domain_page(page_to_mfn(page));
+        memset(l0tab, 0, PAGE_SIZE);
+        l1e = l1tab[l1_table_offset(va)] = 
+            l1e_from_page(page, __PAGE_HYPERVISOR);
+    }
+    else if ( l0tab == NULL) 
+        l0tab = map_domain_page(l1e_get_pfn(l1e));
+
+    l0tab[gpa & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn;
+
+    if ( l2tab )
+    {
+        unmap_domain_page(l2tab);
+        l2tab = NULL;
+    }
+    if ( l1tab )
+    {
+        unmap_domain_page(l1tab);
+        l1tab = NULL;
+    }
+    if ( l0tab )
+    {
+        unmap_domain_page(l0tab);
+        l0tab = NULL;
+    }
+
+    return 1;
+
+nomem:
+
+    return 0;
+}
+
+int
+set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
+              struct domain_mmap_cache *l2cache,
+              struct domain_mmap_cache *l1cache)
+{
+    unsigned long tabpfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table);
+    pgentry_64_t *top;
+    unsigned long va = RO_MPT_VIRT_START + (pfn * sizeof (unsigned long));
+    int error;
+
+    ASSERT(tabpfn != 0);
+    ASSERT(shadow_lock_is_acquired(d));
+
+    top = map_domain_page_with_cache(tabpfn, l2cache);
+    error = map_p2m_entry(top, va, pfn, mfn);
+    unmap_domain_page_with_cache(top, l2cache);
+
+    if ( !error )
+         domain_crash_synchronous();
+        
+    return 1;
+}
+
+int
+alloc_p2m_table(struct domain *d)
+{
+    struct list_head *list_ent;
+    unsigned long va = RO_MPT_VIRT_START; /*  phys_to_machine_mapping */
+    pgentry_64_t *top_tab = NULL;
+    unsigned long mfn;
+    int gpa;
 
     ASSERT ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) );
 
-#if CONFIG_PAGING_LEVELS >= 4
-    l4tab = map_domain_page(
+    top_tab = map_domain_page(
         pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
-    l3tab = map_domain_page(
-        pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
-#endif
+
 
     list_ent = d->page_list.next;
 
-    for ( i = 0; list_ent != &d->page_list; i++ ) 
+    for ( gpa = 0; list_ent != &d->page_list; gpa++ ) 
     {
         struct page_info *page;
-
         page = list_entry(list_ent, struct page_info, list);
-        pfn = page_to_mfn(page);
-
-#if CONFIG_PAGING_LEVELS >= 4
-        l4e = l4tab[l4_table_offset(va)];
-        if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) 
-        {
-            page = alloc_domheap_page(NULL);
-            l3tab = map_domain_page(page_to_mfn(page));
-            memset(l3tab, 0, PAGE_SIZE);
-            l4e = l4tab[l4_table_offset(va)] = 
-                l4e_from_page(page, __PAGE_HYPERVISOR);
-        } 
-        else if ( l3tab == NULL)
-            l3tab = map_domain_page(l4e_get_pfn(l4e));
-#endif
-        l3e = l3tab[l3_table_offset(va)];
-        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) 
-        {
-            page = alloc_domheap_page(NULL);
-            l2tab = map_domain_page(page_to_mfn(page));
-            memset(l2tab, 0, PAGE_SIZE);
-            l3e = l3tab[l3_table_offset(va)] = 
-                l3e_from_page(page, __PAGE_HYPERVISOR);
-        } 
-        else if ( l2tab == NULL) 
-            l2tab = map_domain_page(l3e_get_pfn(l3e));
-
-        l2e = l2tab[l2_table_offset(va)];
-        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 
-        {
-            page = alloc_domheap_page(NULL);
-            l1tab = map_domain_page(page_to_mfn(page));
-            memset(l1tab, 0, PAGE_SIZE);
-            l2e = l2tab[l2_table_offset(va)] = 
-                l2e_from_page(page, __PAGE_HYPERVISOR);
-        } 
-        else if ( l1tab == NULL) 
-            l1tab = map_domain_page(l2e_get_pfn(l2e));
-
-        l1e = l1tab[l1_table_offset(va)];
-        if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 
-        {
-            page = alloc_domheap_page(NULL);
-            l0tab = map_domain_page(page_to_mfn(page));
-            memset(l0tab, 0, PAGE_SIZE);
-            l1e = l1tab[l1_table_offset(va)] = 
-                l1e_from_page(page, __PAGE_HYPERVISOR);
-        }
-        else if ( l0tab == NULL) 
-            l0tab = map_domain_page(l1e_get_pfn(l1e));
-
-        l0tab[i & ((PAGE_SIZE / sizeof (pfn)) - 1) ] = pfn;
-        list_ent = frame_table[pfn].list.next;
-        va += sizeof (pfn);
-
-        if ( l2tab )
-        {
-            unmap_domain_page(l2tab);
-            l2tab = NULL;
-        }
-        if ( l1tab )
-        {
-            unmap_domain_page(l1tab);
-            l1tab = NULL;
-        }
-        if ( l0tab )
-        {
-            unmap_domain_page(l0tab);
-            l0tab = NULL;
-        }
-    }
-#if CONFIG_PAGING_LEVELS >= 4
-    unmap_domain_page(l4tab);
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
-    unmap_domain_page(l3tab);
-#endif
+        mfn = page_to_mfn(page);
+
+        map_p2m_entry(top_tab, va, gpa, mfn);
+        list_ent = frame_table[mfn].list.next;
+        va += sizeof(mfn);
+    }
+
+    unmap_domain_page(top_tab);
+
     return 1;
 }
 
diff -r bee659de2a36 -r a543a4778a7d xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Thu Feb 16 15:35:09 2006
+++ b/xen/include/asm-x86/shadow.h      Thu Feb 16 15:46:21 2006
@@ -115,9 +115,7 @@
 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1) 
- (_max)) << 16) | (_min))
 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
 #define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded) 
>> 16))
-#if CONFIG_PAGING_LEVELS == 2
 extern void shadow_direct_map_clean(struct vcpu *v);
-#endif
 extern int shadow_direct_map_init(struct vcpu *v);
 extern int shadow_direct_map_fault(
     unsigned long vpa, struct cpu_user_regs *regs);
@@ -556,6 +554,38 @@
         update_hl2e(v, va);
 
     __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
+}
+
+static inline void
+__direct_get_l2e(
+    struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e)
+{
+    l2_pgentry_t *phys_vtable;
+
+    ASSERT(shadow_mode_enabled(v->domain));
+
+    phys_vtable = map_domain_page(
+        pagetable_get_pfn(v->domain->arch.phys_table));
+
+    *psl2e = phys_vtable[l2_table_offset(va)];
+
+    unmap_domain_page(phys_vtable);
+}
+
+static inline void
+__direct_set_l2e(
+    struct vcpu *v, unsigned long va, l2_pgentry_t value)
+{
+    l2_pgentry_t *phys_vtable;
+
+    ASSERT(shadow_mode_enabled(v->domain));
+
+    phys_vtable = map_domain_page(
+        pagetable_get_pfn(v->domain->arch.phys_table));
+
+    phys_vtable[l2_table_offset(va)] = value;
+
+    unmap_domain_page(phys_vtable);
 }
 
 static inline void
diff -r bee659de2a36 -r a543a4778a7d xen/include/asm-x86/shadow_64.h
--- a/xen/include/asm-x86/shadow_64.h   Thu Feb 16 15:35:09 2006
+++ b/xen/include/asm-x86/shadow_64.h   Thu Feb 16 15:46:21 2006
@@ -66,9 +66,12 @@
 #define PAGING_L1      1UL
 #define L_MASK  0xff
 
+#define PAE_PAGING_LEVELS   3
+
 #define ROOT_LEVEL_64   PAGING_L4
 #define ROOT_LEVEL_32   PAGING_L2
 
+#define DIRECT_ENTRY    (4UL << 16)
 #define SHADOW_ENTRY    (2UL << 16)
 #define GUEST_ENTRY     (1UL << 16)
 
@@ -94,6 +97,7 @@
 #define entry_empty()           ((pgentry_64_t) { 0 })
 #define entry_from_pfn(pfn, flags)  \
     ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
+#define entry_from_page(page, flags) 
(entry_from_pfn(page_to_mfn(page),(flags)))
 #define entry_add_flags(x, flags)    ((x).lo |= put_pte_flags(flags))
 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
 #define entry_has_changed(x,y,flags) \
@@ -162,16 +166,24 @@
 
     if ( flag & SHADOW_ENTRY )
     {
-       root_level =  ROOT_LEVEL_64;
-       index = table_offset_64(va, root_level);
+        root_level =  ROOT_LEVEL_64;
+        index = table_offset_64(va, root_level);
         le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index];
     }
-    else /* guest entry */  
+    else if ( flag & GUEST_ENTRY )
     {
         root_level = v->domain->arch.ops->guest_paging_levels;
-       index = table_offset_64(va, root_level);
+        index = table_offset_64(va, root_level);
         le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
     }
+    else /* direct mode */
+    {
+        root_level = PAE_PAGING_LEVELS;
+        index = table_offset_64(va, root_level);
+        le_e = (pgentry_64_t *)map_domain_page(
+            pagetable_get_pfn(v->domain->arch.phys_table));
+    }
+
     /*
      * If it's not external mode, then mfn should be machine physical.
      */
@@ -241,6 +253,20 @@
 #define __guest_get_l3e(v, va, sl3e) \
   __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | PAGING_L3)
 
+#define __direct_set_l3e(v, va, value) \
+  __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L3)
+#define __direct_get_l3e(v, va, sl3e) \
+  __rw_entry(v, va, sl3e, DIRECT_ENTRY | GET_ENTRY | PAGING_L3)
+#define __direct_set_l2e(v, va, value) \
+  __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L2)
+#define __direct_get_l2e(v, va, sl2e) \
+  __rw_entry(v, va, sl2e, DIRECT_ENTRY | GET_ENTRY | PAGING_L2)
+#define __direct_set_l1e(v, va, value) \
+  __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L1)
+#define __direct_get_l1e(v, va, sl1e) \
+  __rw_entry(v, va, sl1e, DIRECT_ENTRY | GET_ENTRY | PAGING_L1)
+
+
 static inline int  __guest_set_l2e(
     struct vcpu *v, unsigned long va, void *value, int size)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Fix problems with direct-mapping handling especially when, Xen patchbot -unstable <=