WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: use paging_mode_hap() consistently

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: use paging_mode_hap() consistently
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 05 Apr 2010 23:20:26 -0700
Delivery-date: Mon, 05 Apr 2010 23:21:18 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1270533064 -3600
# Node ID 4a3e131f749897f79ff619235d6310cbd69b3ba3
# Parent  5374514774693ed2710596b8d1931256c01b02ee
x86: use paging_mode_hap() consistently

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c    |    6 +++---
 xen/arch/x86/mm/hap/hap.c     |    9 +++++++--
 xen/arch/x86/mm/mem_sharing.c |    2 +-
 xen/arch/x86/mm/p2m.c         |    6 +++---
 4 files changed, 14 insertions(+), 9 deletions(-)

diff -r 537451477469 -r 4a3e131f7498 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 01 09:55:27 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Apr 06 06:51:04 2010 +0100
@@ -92,7 +92,7 @@ static int vmx_domain_initialise(struct 
 
 static void vmx_domain_destroy(struct domain *d)
 {
-    if ( d->arch.hvm_domain.hap_enabled )
+    if ( paging_mode_hap(d) )
         on_each_cpu(__ept_sync_domain, d, 1);
     vmx_free_vlapic_mapping(d);
 }
@@ -678,7 +678,7 @@ static void vmx_ctxt_switch_to(struct vc
     if ( old_cr4 != new_cr4 )
         write_cr4(new_cr4);
 
-    if ( d->arch.hvm_domain.hap_enabled )
+    if ( paging_mode_hap(d) )
     {
         unsigned int cpu = smp_processor_id();
         /* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
@@ -1222,7 +1222,7 @@ void ept_sync_domain(struct domain *d)
 void ept_sync_domain(struct domain *d)
 {
     /* Only if using EPT and this domain has some VCPUs to dirty. */
-    if ( !d->arch.hvm_domain.hap_enabled || !d->vcpu || !d->vcpu[0] )
+    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
         return;
 
     ASSERT(local_irq_is_enabled());
diff -r 537451477469 -r 4a3e131f7498 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Apr 01 09:55:27 2010 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Apr 06 06:51:04 2010 +0100
@@ -550,8 +550,13 @@ int hap_enable(struct domain *d, u32 mod
 {
     unsigned int old_pages;
     int rv = 0;
+    uint32_t oldmode;
 
     domain_pause(d);
+
+    oldmode = d->arch.paging.mode;
+    d->arch.paging.mode = mode | PG_HAP_enable;
+
     /* error check */
     if ( (d == current->domain) )
     {
@@ -582,9 +587,9 @@ int hap_enable(struct domain *d, u32 mod
             goto out;
     }
 
-    d->arch.paging.mode = mode | PG_HAP_enable;
-
  out:
+    if (rv)
+        d->arch.paging.mode = oldmode;
     domain_unpause(d);
     return rv;
 }
diff -r 537451477469 -r 4a3e131f7498 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Apr 01 09:55:27 2010 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Tue Apr 06 06:51:04 2010 +0100
@@ -44,7 +44,7 @@ static void mem_sharing_audit(void);
 
 
 #define hap_enabled(d) \
-    (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
+    (is_hvm_domain(d) && paging_mode_hap(d))
 #define mem_sharing_enabled(d) \
     (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
  
diff -r 537451477469 -r 4a3e131f7498 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Apr 01 09:55:27 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Apr 06 06:51:04 2010 +0100
@@ -1231,7 +1231,7 @@ p2m_set_entry(struct domain *d, unsigned
     if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
                          L3_PAGETABLE_SHIFT - PAGE_SHIFT,
                          ((CONFIG_PAGING_LEVELS == 3)
-                          ? (d->arch.hvm_domain.hap_enabled ? 4 : 8)
+                          ? (paging_mode_hap(d) ? 4 : 8)
                           : L3_PAGETABLE_ENTRIES),
                          PGT_l2_page_table) )
         goto out;
@@ -1568,7 +1568,7 @@ int p2m_init(struct domain *d)
     p2m->get_entry_current = p2m_gfn_to_mfn_current;
     p2m->change_entry_type_global = p2m_change_type_global;
 
-    if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&
+    if ( is_hvm_domain(d) && paging_mode_hap(d) &&
          (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(d);
 
@@ -1595,7 +1595,7 @@ int set_p2m_entry(struct domain *d, unsi
 
     while ( todo )
     {
-        if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled )
+        if ( is_hvm_domain(d) && paging_mode_hap(d) )
             order = (((gfn | mfn_x(mfn) | todo) & (SUPERPAGE_PAGES - 1)) == 0) 
?
                 9 : 0;
         else

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: use paging_mode_hap() consistently, Xen patchbot-unstable <=