WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] Unmmap guest's EPT mapping for poison memory

To: Tim Deegan <Tim.Deegan@xxxxxxxxxx>, Keir Fraser <keir.fraser@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] Unmmap guest's EPT mapping for poison memory
From: "Jiang, Yunhong" <yunhong.jiang@xxxxxxxxx>
Date: Wed, 14 Jul 2010 15:41:12 +0800
Accept-language: en-US
Acceptlanguage: en-US
Cc: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Wed, 14 Jul 2010 00:42:51 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcsjJ+7Mmgsx9LvcR+GxC4ddPzKLgg==
Thread-topic: [PATCH] Unmmap guest's EPT mapping for poison memory
Unmmap guest's EPT mapping for poison memory

Unmmap poisone memory that is assigned to guest, so that guest can't access the 
memory anymore. Currently the unmmap is only done for EPT guest, through remove 
the EPT entry. Support for PV guest should be added later. No unmmap support 
for shadow guest because system support recoverable MCA should also support EPT.

If we can't unmmap, we will destroy the guest.

signed-off-by: Jiang, Yunhong <yunhong.jiang@xxxxxxxxx>

diff -r bf51b671f269 xen/arch/x86/cpu/mcheck/mce.h
--- a/xen/arch/x86/cpu/mcheck/mce.h     Mon Jul 12 13:59:39 2010 +0800
+++ b/xen/arch/x86/cpu/mcheck/mce.h     Mon Jul 12 14:23:05 2010 +0800
@@ -49,6 +49,7 @@ void amd_nonfatal_mcheck_init(struct cpu
 void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
 
 int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d);
+int unmmap_broken_page(struct domain *d, unsigned long mfn, unsigned long gfn);
 
 u64 mce_cap_init(void);
 extern int firstbank;
diff -r bf51b671f269 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Mon Jul 12 13:59:39 2010 +0800
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Mon Jul 12 17:25:47 2010 +0800
@@ -654,16 +654,22 @@ static void intel_memerr_dhandler(int bn
             BUG_ON( result->owner == DOMID_COW );
             if ( result->owner != DOMID_XEN ) {
                 d = get_domain_by_id(result->owner);
+                ASSERT(d);
+                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
+
                 if ( !is_vmce_ready(bank, d) )
                 {
-                    /* Should not inject vMCE to guest */
-                    if ( d )
-                        put_domain(d);
-                    return;
+                    printk("DOM%d not ready for vMCE\n", d->domain_id);
+                    goto vmce_failed;
                 }
 
-                ASSERT(d);
-                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
+                if ( unmmap_broken_page(d, mfn, gfn) )
+                {
+                    printk("Unmap broken memory %lx for DOM%d failed\n",
+                            mfn, d->domain_id);
+                    goto vmce_failed;
+                }
+
                 bank->mc_addr =  gfn << PAGE_SHIFT |
                   (bank->mc_addr & (PAGE_SIZE -1 ));
                 if ( fill_vmsr_data(bank, d,
@@ -671,18 +677,15 @@ static void intel_memerr_dhandler(int bn
                 {
                     mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d "
                       "failed\n", result->owner);
-                    put_domain(d);
-                    domain_crash(d);
-                    return;
+                    goto vmce_failed;
                 }
+
                 /* We will inject vMCE to DOMU*/
                 if ( inject_vmce(d) < 0 )
                 {
                     mce_printk(MCE_QUIET, "inject vMCE to DOM%d"
                       " failed\n", d->domain_id);
-                    put_domain(d);
-                    domain_crash(d);
-                    return;
+                    goto vmce_failed;
                 }
                 /* Impacted domain go on with domain's recovery job
                  * if the domain has its own MCA handler.
@@ -691,6 +694,11 @@ static void intel_memerr_dhandler(int bn
                  */
                 result->result = MCA_RECOVERED;
                 put_domain(d);
+
+                return;
+vmce_failed:
+                put_domain(d);
+                domain_crash(d);
             }
         }
     }
diff -r bf51b671f269 xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Mon Jul 12 13:59:39 2010 +0800
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Mon Jul 12 14:30:21 2010 +0800
@@ -558,3 +558,28 @@ int is_vmce_ready(struct mcinfo_bank *ba
 
     return 0;
 }
+
+/* Now we only have support for HAP guest */
+int unmmap_broken_page(struct domain *d, unsigned long mfn, unsigned long gfn)
+{
+    /* Always trust dom0 */
+    if ( d == dom0 )
+        return 0;
+
+    if (is_hvm_domain(d) && (paging_mode_hap(d)) )
+    {
+        p2m_type_t pt;
+
+        gfn_to_mfn_query(d, gfn, &pt);
+        /* What will happen if is paging-in? */
+        if ( pt == p2m_ram_rw )
+        {
+            p2m_change_type(d, gfn, pt, p2m_ram_broken);
+            return 0;
+        }
+
+    }
+
+    return -1;
+}
+


Attachment: vmce_guard.patch
Description: vmce_guard.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel