WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2 of 5] mem_access: p2m and ept function changes to p

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 2 of 5] mem_access: p2m and ept function changes to propagate access
From: Joe Epstein <jepstein@xxxxxxxxxxxxxxxxxxxx>
Date: Tue, 28 Dec 2010 23:27:21 -0800
Delivery-date: Tue, 28 Dec 2010 23:31:18 -0800
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:mime-version:sender:received:from:date :x-google-sender-auth:message-id:subject:to:content-type; bh=qoO8t/n+VXOwX7XEL3bMfKti+ZEtXwxwJGEsAiwSlNM=; b=Ftl6O61Da+llhF5biNCJTaWq7dRKz4UIy9ku0sMonCet8mhHEiIV3dDzl517QfD/ek 0l04+y8xbljILUJe2Zz2NhaBAcmGt/FO3WRv/3qEfF5et6/7sxkEAepmxFcljKkOXL0A FucXpTyuMOa1wUVa6D/IkpF18Iddvx81bt2b4=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:sender:from:date:x-google-sender-auth:message-id :subject:to:content-type; b=nEWU0vSK5AiCHmQmaL9HDSVWHFTRPcPZ++ElthmjmQ2pb4BtWdIzYmdsB0aDC30R4p OAIgfRkr3w+U0JEPEgo9LAgyE/AqCxQnedN92+i1j29OGpcxDFXC/ZJ8EAfKVh0iyD6C dFDivCtPyTvVgIacJms+u1XSIpjb4zauZwTXA=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
* Adds mem_access handling.
* Propagates the access permissions on many page operations; currently
  the interface is one of essentially loose consistency, and page type
  changes will revert the page access to the default one of the domain
* Pauses the VCPU if a memory event handler is required and one is not
  present; otherwise, reverts permissions to the most lenient

Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>

diff -r 4e108cf56d07 xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile  Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/Makefile  Tue Dec 28 22:35:30 2010 -0800
@@ -9,6 +9,7 @@
 obj-$(x86_64) += mem_event.o
 obj-$(x86_64) += mem_paging.o
 obj-$(x86_64) += mem_sharing.o
+obj-$(x86_64) += mem_access.o

 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r 4e108cf56d07 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c     Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/hap/p2m-ept.c     Tue Dec 28 22:35:30 2010 -0800
@@ -62,8 +62,9 @@
     return r;
 }

-static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type)
+static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t
type, p2m_access_t access)
 {
+    /* First apply type permissions */
     switch(type)
     {
         case p2m_invalid:
@@ -75,30 +76,61 @@
         case p2m_ram_paging_in_start:
         default:
             entry->r = entry->w = entry->x = 0;
-            return;
+            break;
         case p2m_ram_rw:
             entry->r = entry->w = entry->x = 1;
-            return;
+            break;
         case p2m_mmio_direct:
             entry->r = entry->x = 1;
             entry->w = !rangeset_contains_singleton(mmio_ro_ranges,
                                                     entry->mfn);
-            return;
+            break;
         case p2m_ram_logdirty:
         case p2m_ram_ro:
         case p2m_ram_shared:
             entry->r = entry->x = 1;
             entry->w = 0;
-            return;
+            break;
         case p2m_grant_map_rw:
             entry->r = entry->w = 1;
             entry->x = 0;
-            return;
+            break;
         case p2m_grant_map_ro:
             entry->r = 1;
             entry->w = entry->x = 0;
-            return;
+            break;
     }
+
+
+    /* Then restrict with access permissions */
+    switch (access)
+    {
+        case p2m_access_n:
+            entry->r = entry->w = entry->x = 0;
+            break;
+        case p2m_access_r:
+            entry->w = entry->x = 0;
+            break;
+        case p2m_access_w:
+            entry->r = entry->x = 0;
+            break;
+        case p2m_access_x:
+            entry->r = entry->w = 0;
+            break;
+        case p2m_access_rx:
+        case p2m_access_rx2rw:
+            entry->w = 0;
+            break;
+        case p2m_access_wx:
+            entry->r = 0;
+            break;
+        case p2m_access_rw:
+            entry->x = 0;
+            break;
+        case p2m_access_rwx:
+            break;
+    }
+
 }

 #define GUEST_TABLE_MAP_FAILED  0
@@ -117,6 +149,8 @@

     ept_entry->epte = 0;
     ept_entry->mfn = page_to_mfn(pg);
+    ept_entry->access = p2m->default_access;
+
     ept_entry->r = ept_entry->w = ept_entry->x = 1;

     return 1;
@@ -170,11 +204,12 @@
         epte->emt = ept_entry->emt;
         epte->ipat = ept_entry->ipat;
         epte->sp = (level > 1) ? 1 : 0;
+        epte->access = ept_entry->access;
         epte->sa_p2mt = ept_entry->sa_p2mt;
         epte->mfn = ept_entry->mfn + i * trunk;
         epte->rsvd2_snp = ( iommu_enabled && iommu_snoop ) ? 1 : 0;

-        ept_p2m_type_to_flags(epte, epte->sa_p2mt);
+        ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);

         if ( (level - 1) == target )
             continue;
@@ -260,7 +295,7 @@
  */
 static int
 ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-              unsigned int order, p2m_type_t p2mt)
+              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     ept_entry_t *table, *ept_entry = NULL;
     unsigned long gfn_remainder = gfn;
@@ -334,9 +369,11 @@
             /* Construct the new entry, and then write it once */
             new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
                                                 direct_mmio);
+
             new_entry.ipat = ipat;
             new_entry.sp = order ? 1 : 0;
             new_entry.sa_p2mt = p2mt;
+            ept_entry->access = p2ma;
             new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);

             if ( new_entry.mfn == mfn_x(mfn) )
@@ -344,7 +381,7 @@
             else
                 new_entry.mfn = mfn_x(mfn);

-            ept_p2m_type_to_flags(&new_entry, p2mt);
+            ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
         }

         atomic_write_ept_entry(ept_entry, new_entry);
@@ -384,6 +421,7 @@
         new_entry.ipat = ipat;
         new_entry.sp = i ? 1 : 0;
         new_entry.sa_p2mt = p2mt;
+        ept_entry->access = p2ma;
         new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);

         if ( new_entry.mfn == mfn_x(mfn) )
@@ -391,7 +429,7 @@
         else /* the caller should take care of the previous page */
             new_entry.mfn = mfn_x(mfn);

-        ept_p2m_type_to_flags(&new_entry, p2mt);
+        ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);

         atomic_write_ept_entry(ept_entry, new_entry);
     }
@@ -447,7 +485,7 @@

 /* Read ept p2m entries */
 static mfn_t ept_get_entry(struct p2m_domain *p2m,
-                           unsigned long gfn, p2m_type_t *t,
+                           unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
                            p2m_query_t q)
 {
     struct domain *d = p2m->domain;
@@ -460,6 +498,7 @@
     mfn_t mfn = _mfn(INVALID_MFN);

     *t = p2m_mmio_dm;
+    *a = p2m_access_n;

     /* This pfn is higher than the highest the p2m map currently holds */
     if ( gfn > p2m->max_mapped_pfn )
@@ -519,6 +558,8 @@
     if ( ept_entry->sa_p2mt != p2m_invalid )
     {
         *t = ept_entry->sa_p2mt;
+        *a = ept_entry->access;
+
         mfn = _mfn(ept_entry->mfn);
         if ( i )
         {
@@ -626,10 +667,10 @@
 }

 static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
-                                   unsigned long gfn, p2m_type_t *t,
+                                   unsigned long gfn, p2m_type_t *t,
p2m_access_t *a,
                                    p2m_query_t q)
 {
-    return ept_get_entry(p2m, gfn, t, q);
+    return ept_get_entry(p2m, gfn, t, a, q);
 }

 /*
@@ -689,7 +730,7 @@
                     order = level * EPT_TABLE_ORDER;
                     if ( need_modify_ept_entry(p2m, gfn, mfn,
                           e.ipat, e.emt, e.sa_p2mt) )
-                        ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+                        ept_set_entry(p2m, gfn, mfn, order,
e.sa_p2mt, e.access);
                     gfn += trunk;
                     break;
                 }
@@ -699,7 +740,7 @@
         else /* gfn assigned with 4k */
         {
             if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt,
e.sa_p2mt) )
-                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
         }
     }
     p2m_unlock(p2m);
@@ -730,7 +771,7 @@
                 continue;

             e.sa_p2mt = nt;
-            ept_p2m_type_to_flags(&e, nt);
+            ept_p2m_type_to_flags(&e, nt, e.access);
             atomic_write_ept_entry(&epte[i], e);
         }
     }
diff -r 4e108cf56d07 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Tue Dec 28 22:35:30 2010 -0800
@@ -26,6 +26,7 @@
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
 #include <asm/mem_paging.h>
+#include <asm/mem_access.h>

 /* for public/io/ring.h macros */
 #define xen_mb()   mb()
@@ -67,6 +68,9 @@

     mem_event_ring_lock_init(d);

+    /* Wake any VCPUs paused for memory events */
+    mem_event_unpause_vcpus(d);
+
     return 0;

  err_shared:
@@ -143,12 +147,32 @@
             vcpu_wake(v);
 }

+int mem_event_check_listener(struct domain *d) {
+    struct vcpu *curr = current;
+
+    /* If a listener exists, return */
+    if ( d->mem_event.ring_page )
+        return 1;
+
+    /* Sleep the VCPU */
+    if ( (curr->domain->domain_id == d->domain_id) )
+    {
+        set_bit(_VPF_mem_event, &curr->pause_flags);
+        vcpu_sleep_nosync(curr);
+    }
+
+    return 0;
+}
+
 int mem_event_check_ring(struct domain *d)
 {
     struct vcpu *curr = current;
     int free_requests;
     int ring_full;

+    if ( !d->mem_event.ring_page )
+        return -1;
+
     mem_event_ring_lock(d);

     free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
@@ -157,7 +181,7 @@
         gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
         WARN_ON(free_requests == 0);
     }
-    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
+    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;

     if ( (curr->domain->domain_id == d->domain_id) && ring_full )
     {
@@ -203,7 +227,11 @@
         return rc;
 #endif

-    if ( mec->mode == 0 )
+    rc = -ENOSYS;
+
+    switch ( mec-> mode )
+    {
+    case 0:
     {
         switch( mec->op )
         {
@@ -268,13 +296,18 @@
             rc = -ENOSYS;
             break;
         }
+        break;
     }
-    else
+    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
     {
-        rc = -ENOSYS;
-
-        if ( mec->mode & XEN_DOMCTL_MEM_EVENT_OP_PAGING )
-            rc = mem_paging_domctl(d, mec, u_domctl);
+        rc = mem_paging_domctl(d, mec, u_domctl);
+        break;
+    }
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
+    {
+        rc = mem_access_domctl(d, mec, u_domctl);
+        break;
+    }
     }

     return rc;
diff -r 4e108cf56d07 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c     Tue Dec 28 22:35:30 2010 -0800
@@ -304,6 +304,8 @@
     if(page != NULL) return page;

     memset(&req, 0, sizeof(req));
+    req.type = MEM_EVENT_TYPE_SHARED;
+
     if(must_succeed)
     {
         /* We do not support 'must_succeed' any more. External operations such
diff -r 4e108cf56d07 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/p2m.c     Tue Dec 28 22:35:30 2010 -0800
@@ -285,7 +285,7 @@
  */
 static
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-                  unsigned int page_order, p2m_type_t p2mt);
+                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);

 static int
 p2m_pod_cache_add(struct p2m_domain *p2m,
@@ -693,7 +693,7 @@
     {
         /* All PoD: Mark the whole region invalid and tell caller
          * we're done. */
-        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
+        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order,
p2m_invalid, p2m->default_access);
         p2m->pod.entry_count-=(1<<order); /* Lock: p2m */
         BUG_ON(p2m->pod.entry_count < 0);
         ret = 1;
@@ -716,7 +716,7 @@
         mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
         if ( t == p2m_populate_on_demand )
         {
-            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
p2m_invalid, p2m->default_access);
             p2m->pod.entry_count--; /* Lock: p2m */
             BUG_ON(p2m->pod.entry_count < 0);
             pod--;
@@ -729,7 +729,7 @@

             page = mfn_to_page(mfn);

-            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
p2m_invalid, p2m->default_access);
             set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);

             p2m_pod_cache_add(p2m, page, 0);
@@ -844,7 +844,7 @@
     /* Try to remove the page, restoring old mapping if it fails. */
     set_p2m_entry(p2m, gfn,
                   _mfn(POPULATE_ON_DEMAND_MFN), 9,
-                  p2m_populate_on_demand);
+                  p2m_populate_on_demand, p2m->default_access);

     /* Make none of the MFNs are used elsewhere... for example, mapped
      * via the grant table interface, or by qemu.  Allow one refcount for
@@ -899,7 +899,7 @@

 out_reset:
     if ( reset )
-        set_p2m_entry(p2m, gfn, mfn0, 9, type0);
+        set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);

 out:
     return ret;
@@ -957,7 +957,7 @@
         /* Try to remove the page, restoring old mapping if it fails. */
         set_p2m_entry(p2m, gfns[i],
                       _mfn(POPULATE_ON_DEMAND_MFN), 0,
-                      p2m_populate_on_demand);
+                      p2m_populate_on_demand, p2m->default_access);

         /* See if the page was successfully unmapped.  (Allow one refcount
          * for being allocated to a domain.) */
@@ -966,7 +966,7 @@
             unmap_domain_page(map[i]);
             map[i] = NULL;

-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);

             continue;
         }
@@ -988,7 +988,7 @@
          * check timing.  */
         if ( j < PAGE_SIZE/sizeof(*map[i]) )
         {
-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);
         }
         else
         {
@@ -1121,7 +1121,7 @@
          * 512 2MB pages. The rest of 511 calls are unnecessary.
          */
         set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
-                      p2m_populate_on_demand);
+                      p2m_populate_on_demand, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
         return 0;
@@ -1158,7 +1158,7 @@

     gfn_aligned = (gfn >> order) << order;

-    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
+    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
p2m->default_access);

     for( i = 0; i < (1UL << order); i++ )
         set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
@@ -1198,7 +1198,7 @@
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
         set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
-                      p2m_populate_on_demand);
+                      p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
         struct {
@@ -1250,7 +1250,7 @@
 // Returns 0 on error (out of memory)
 static int
 p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-              unsigned int page_order, p2m_type_t p2mt)
+              unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     // XXX -- this might be able to be faster iff current->domain == d
     mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
@@ -1401,7 +1401,7 @@
 }

 static mfn_t
-p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t
*t, p2m_access_t *a,
                p2m_query_t q)
 {
     mfn_t mfn;
@@ -1416,6 +1416,8 @@
      * XXX Once we start explicitly registering MMIO regions in the p2m
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
+    /* Not implemented except with EPT */
+    *a = p2m_access_rwx;

     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));

@@ -1542,7 +1544,7 @@

 /* Read the current domain's p2m table (through the linear mapping). */
 static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
-                                    unsigned long gfn, p2m_type_t *t,
+                                    unsigned long gfn, p2m_type_t *t,
p2m_access_t *a,
                                     p2m_query_t q)
 {
     mfn_t mfn = _mfn(INVALID_MFN);
@@ -1553,6 +1555,9 @@
      * XXX Once we start explicitly registering MMIO regions in the p2m
      * XXX we will return p2m_invalid for unmapped gfns */

+    /* Not currently implemented except for EPT */
+    *a = p2m_access_rwx;
+
     if ( gfn <= p2m->max_mapped_pfn )
     {
         l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
@@ -1726,6 +1731,8 @@
     INIT_PAGE_LIST_HEAD(&p2m->pod.single);

     p2m->domain = d;
+    p2m->default_access = p2m_access_rwx;   /* Dom flags override */
+
     p2m->set_entry = p2m_set_entry;
     p2m->get_entry = p2m_gfn_to_mfn;
     p2m->get_entry_current = p2m_gfn_to_mfn_current;
@@ -1737,7 +1744,7 @@
     return;
 }

-int p2m_init(struct domain *d)
+int p2m_init(struct domain *d, unsigned int domcr_flags)
 {
     struct p2m_domain *p2m;

@@ -1745,6 +1752,9 @@
     if ( p2m == NULL )
         return -ENOMEM;
     p2m_initialise(d, p2m);
+
+    if ( domcr_flags & DOMCRF_access_required )
+        p2m->access_required = 1;

     return 0;
 }
@@ -1759,7 +1769,7 @@

 static
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-                    unsigned int page_order, p2m_type_t p2mt)
+                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct domain *d = p2m->domain;
     unsigned long todo = 1ul << page_order;
@@ -1776,7 +1786,7 @@
         else
             order = 0;

-        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
+        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
             rc = 0;
         gfn += 1ul << order;
         if ( mfn_x(mfn) != INVALID_MFN )
@@ -1837,7 +1847,7 @@

     /* Initialise physmap tables for slot zero. Other code assumes this. */
     if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
-                        p2m_invalid) )
+                        p2m_invalid, p2m->default_access) )
         goto error;

     /* Copy all existing mappings from the page list and m2p */
@@ -1856,7 +1866,7 @@
             (gfn != 0x55555555L)
 #endif
              && gfn != INVALID_M2P_ENTRY
-            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) )
+            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw,
p2m->default_access) )
             goto error_unlock;
     }
     spin_unlock(&p2m->domain->page_alloc_lock);
@@ -1883,6 +1893,7 @@
 #ifdef __x86_64__
     unsigned long gfn;
     p2m_type_t t;
+    p2m_access_t a;
     mfn_t mfn;
 #endif

@@ -1891,7 +1902,7 @@
 #ifdef __x86_64__
     for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
-        mfn = p2m->get_entry(p2m, gfn, &t, p2m_query);
+        mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
             BUG_ON(mem_sharing_unshare_page(p2m, gfn,
MEM_SHARING_DESTROY_GFN));
     }
@@ -2188,6 +2199,7 @@
     unsigned long i;
     mfn_t mfn_return;
     p2m_type_t t;
+    p2m_access_t a;

     if ( !paging_mode_translate(p2m->domain) )
     {
@@ -2201,12 +2213,12 @@

     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
+        mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
         if ( !p2m_is_grant(t) )
             set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
         ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
     }
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
+    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
p2m_invalid, p2m->default_access);
 }

 void
@@ -2286,7 +2298,7 @@

     /* Now, actually do the two-way mapping */
     if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
-                        p2m_populate_on_demand) )
+                        p2m_populate_on_demand, p2m->default_access) )
         rc = -EINVAL;
     else
     {
@@ -2399,7 +2411,7 @@
     /* Now, actually do the two-way mapping */
     if ( mfn_valid(_mfn(mfn)) )
     {
-        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
+        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t,
p2m->default_access) )
             rc = -EINVAL;
         if ( !p2m_is_grant(t) )
         {
@@ -2412,7 +2424,7 @@
         gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
                  gfn, mfn);
         if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
-                            p2m_invalid) )
+                            p2m_invalid, p2m->default_access) )
             rc = -EINVAL;
         else
         {
@@ -2565,7 +2577,7 @@
 }

 /* Modify the p2m type of a single gfn from ot to nt, returning the
- * entry's previous type */
+ * entry's previous type.  Resets the access permissions. */
 p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
                            p2m_type_t ot, p2m_type_t nt)
 {
@@ -2578,7 +2590,7 @@

     mfn = gfn_to_mfn_query(p2m, gfn, &pt);
     if ( pt == ot )
-        set_p2m_entry(p2m, gfn, mfn, 0, nt);
+        set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);

     p2m_unlock(p2m);

@@ -2609,7 +2621,7 @@

     P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
     p2m_lock(p2m);
-    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
+    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
     if ( 0 == rc )
@@ -2639,7 +2651,7 @@
         return 0;
     }
     p2m_lock(p2m);
-    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
+    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0, p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);

@@ -2665,7 +2677,7 @@
     set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);

     P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
+    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
@@ -2708,7 +2720,7 @@

     /* Fix p2m entry */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
+    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);

@@ -2745,7 +2757,7 @@

     /* Remove mapping from p2m table */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged,
p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);

@@ -2767,6 +2779,7 @@
         return;

     memset(&req, 0, sizeof(req));
+    req.type = MEM_EVENT_TYPE_PAGING;

     /* Fix p2m mapping */
     /* XXX: It seems inefficient to have this here, as it's only needed
@@ -2775,7 +2788,7 @@
     if ( p2mt == p2m_ram_paged )
     {
         p2m_lock(p2m);
-        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0,
p2m_ram_paging_in_start, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
     }
@@ -2811,7 +2824,7 @@

     /* Fix p2m mapping */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in,
p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);

@@ -2831,7 +2844,7 @@
     /* Fix p2m entry */
     mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
     p2m_lock(p2m);
-    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
+    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);

@@ -2844,6 +2857,97 @@
 }
 #endif /* __x86_64__ */

+int p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
unsigned long gla,
+                         bool_t access_r, bool_t access_w, bool_t access_x)
+{
+    struct vcpu *v = current;
+    mem_event_request_t req;
+    unsigned long gfn = gpa >> PAGE_SHIFT;
+    struct domain *d = v->domain;
+    struct p2m_domain* p2m = p2m_get_hostp2m(d);
+    int res;
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    p2m_access_t p2ma;
+
+    /* First, handle rx2rw conversion automatically */
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query);
+
+    if ( access_w && p2ma == p2m_access_rx2rw ) {
+        p2m_lock(p2m);
+        p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rw);
+        p2m_unlock(p2m);
+
+        return 1;  /* handled */
+    }
+
+    /* Otherwise, check if there is a memory event listener, and send
the message along */
+    res = mem_event_check_ring(d);
+    if ( res < 0 )
+    {
+        /* No listener */
+        if ( p2m->access_required )
+        {
+            printk(XENLOG_INFO
+                   "Memory access permissions failure, no mem_event
listener: pausing VCPU %d, dom %d\n",
+                   v->vcpu_id, d->domain_id);
+
+            /* Will pause the VCPU */
+            (void) mem_event_check_listener(d);
+        }
+        else
+        {
+            /* A listener is not required, so clear the access restrictions */
+            p2m_lock(p2m);
+            p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rwx);
+            p2m_unlock(p2m);
+        }
+
+        return 1;
+    }
+    else if ( res > 0 )
+        return 1;  /* No space in buffer */
+
+    memset(&req, 0, sizeof(req));
+    req.type = MEM_EVENT_TYPE_ACCESS;
+
+    /* Pause the current VCPU unconditionally */
+    vcpu_pause_nosync(v);
+    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+
+    /* Send request to mem event */
+    req.gfn = gfn;
+    req.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+    req.gla_valid = gla_valid;
+    req.gla = gla;
+    req.access_r = access_r;
+    req.access_w = access_w;
+    req.access_x = access_x;
+
+    req.vcpu_id = v->vcpu_id;
+
+    mem_event_put_request(d, &req);
+
+    /* VCPU paused, mem event request sent */
+    return 1;
+}
+
+void p2m_mem_access_resume(struct p2m_domain *p2m)
+{
+    struct domain *d = p2m->domain;
+    mem_event_response_t rsp;
+
+    mem_event_get_response(d, &rsp);
+
+    /* Unpause domain */
+    if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+
+    /* Unpause any domains that were paused because the ring was full
or no listener
+     * was available */
+    mem_event_unpause_vcpus(d);
+}
+
 /*
  * Local variables:
  * mode: C
diff -r 4e108cf56d07 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/paging.c  Tue Dec 28 22:35:30 2010 -0800
@@ -647,7 +647,7 @@
 {
     int rc;

-    if ( (rc = p2m_init(d)) != 0 )
+    if ( (rc = p2m_init(d, domcr_flags)) != 0 )
         return rc;

     /* The order of the *_init calls below is important, as the later

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 2 of 5] mem_access: p2m and ept function changes to propagate access, Joe Epstein <=