WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm/p2m: Make p2m interfaces take stru

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm/p2m: Make p2m interfaces take struct domain arguments.
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 16 Jun 2011 11:12:31 +0100
Delivery-date: Thu, 16 Jun 2011 03:28:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID de0a051b36ceb01738348426fdd3d52a5776ba1a
# Parent  0d3e0a571fdddc873d1ff3750d15db6fc58fff8e
x86/mm/p2m: Make p2m interfaces take struct domain arguments.

As part of the nested HVM patch series, many p2m functions were changed
to take pointers to p2m tables rather than to domains.  This patch
reverses that for almost all of them, which:
 - gets rid of a lot of "p2m_get_hostp2m(d)" in code which really
   shouldn't have to know anything about how gfns become mfns.
 - ties sharing and paging interfaces to a domain, which is
   what they actually act on, rather than a particular p2m table.

In developing this patch it became clear that memory-sharing and nested
HVM are unlikely to work well together.  I haven't tried to fix that
here beyond adding some assertions around suspect paths (as this patch
is big enough with just the interface changes)

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Thu Jun 02 13:16:52 2011 +0100
@@ -577,7 +577,6 @@
 int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
 {
     mfn_t r_mfn;
-    struct p2m_domain *p2m;
     p2m_type_t pt;
 
     /* Always trust dom0's MCE handler will prevent future access */
@@ -590,18 +589,11 @@
     if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
         return -ENOSYS;
 
-    p2m = p2m_get_hostp2m(d);
-    ASSERT(p2m);
-
-    /* This only happen for PoD memory, which should be handled seperetely */
-    if (gfn > p2m->max_mapped_pfn)
-        return -EINVAL;
-
-    r_mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+    r_mfn = gfn_to_mfn_query(d, gfn, &pt);
     if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
     {
         ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
-        p2m_change_type(p2m, gfn, pt, p2m_ram_broken);
+        p2m_change_type(d, gfn, pt, p2m_ram_broken);
         return 0;
     }
 
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/debug.c
--- a/xen/arch/x86/debug.c      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/debug.c      Thu Jun 02 13:16:52 2011 +0100
@@ -58,7 +58,7 @@
         return INVALID_MFN;
     }
 
-    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(dp), gfn, &gfntype)); 
+    mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype)); 
     if ( p2m_is_readonly(gfntype) && toaddr )
     {
         DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/domain.c     Thu Jun 02 13:16:52 2011 +0100
@@ -164,9 +164,7 @@
     }
 
     if ( is_hvm_domain(d) )
-    {
-        p2m_pod_dump_data(p2m_get_hostp2m(d));
-    }
+        p2m_pod_dump_data(d);
 
     spin_lock(&d->page_alloc_lock);
     page_list_for_each ( page, &d->xenpage_list )
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/domctl.c     Thu Jun 02 13:16:52 2011 +0100
@@ -977,7 +977,7 @@
 
             ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
             for ( i = 0; i < nr_mfns; i++ )
-                set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i));
+                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
         }
         else
         {
@@ -986,7 +986,7 @@
                  gfn, mfn, nr_mfns);
 
             for ( i = 0; i < nr_mfns; i++ )
-                clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i);
+                clear_mmio_p2m_entry(d, gfn+i);
             ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
         }
 
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Thu Jun 02 13:16:52 2011 +0100
@@ -55,7 +55,6 @@
     paddr_t value = ram_gpa;
     int value_is_ptr = (p_data == NULL);
     struct vcpu *curr = current;
-    struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain);
     ioreq_t *p = get_ioreq(curr);
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
@@ -63,10 +62,10 @@
     int rc;
 
     /* Check for paged out page */
-    ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt);
+    ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, ram_gfn);
+        p2m_mem_paging_populate(curr->domain, ram_gfn);
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
@@ -640,7 +639,6 @@
     unsigned long saddr, daddr, bytes;
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
-    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
     p2m_type_t p2mt;
     int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
     char *buf;
@@ -671,12 +669,12 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    (void)gfn_to_mfn(p2m, sgpa >> PAGE_SHIFT, &p2mt);
+    (void)gfn_to_mfn(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
 
-    (void)gfn_to_mfn(p2m, dgpa >> PAGE_SHIFT, &p2mt);
+    (void)gfn_to_mfn(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
@@ -347,17 +347,16 @@
     struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
 {
     struct page_info *page;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
     unsigned long mfn;
     void *va;
 
-    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt));
+    mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt));
     if ( !p2m_is_ram(p2mt) )
         return -EINVAL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gmfn);
+        p2m_mem_paging_populate(d, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1181,7 +1180,7 @@
     p2m_access_t p2ma;
     mfn_t mfn;
     struct vcpu *v = current;
-    struct p2m_domain *p2m = NULL;
+    struct p2m_domain *p2m;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1270,12 +1269,13 @@
 #ifdef __x86_64__
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
-        p2m_mem_paging_populate(p2m, gfn);
+        p2m_mem_paging_populate(v->domain, gfn);
 
     /* Mem sharing: unshare the page and try again */
     if ( p2mt == p2m_ram_shared )
     {
-        mem_sharing_unshare_page(p2m, gfn, 0);
+        ASSERT(!p2m_is_nestedp2m(p2m));
+        mem_sharing_unshare_page(p2m->domain, gfn, 0);
         return 1;
     }
 #endif
@@ -1289,7 +1289,7 @@
          * page.
          */
         paging_mark_dirty(v->domain, mfn_x(mfn));
-        p2m_change_type(p2m, gfn, p2m_ram_logdirty, p2m_ram_rw);
+        p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
         return 1;
     }
 
@@ -1486,7 +1486,6 @@
 {
     struct vcpu *v = current;
     p2m_type_t p2mt;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
     unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -1526,7 +1525,7 @@
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
-            mfn = mfn_x(gfn_to_mfn(p2m, gfn, &p2mt));
+            mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt));
             if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
                  !get_page(mfn_to_page(mfn), v->domain))
             {
@@ -1617,8 +1616,7 @@
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
-            value >> PAGE_SHIFT, &p2mt));
+        mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt));
         if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
              !get_page(mfn_to_page(mfn), v->domain) )
               goto bad_cr3;
@@ -1764,23 +1762,23 @@
 {
     unsigned long mfn;
     p2m_type_t p2mt;
-    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+    struct domain *d = current->domain;
 
     mfn = mfn_x(writable
-                ? gfn_to_mfn_unshare(p2m, gfn, &p2mt)
-                : gfn_to_mfn(p2m, gfn, &p2mt));
+                ? gfn_to_mfn_unshare(d, gfn, &p2mt)
+                : gfn_to_mfn(d, gfn, &p2mt));
     if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
         return NULL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn);
+        p2m_mem_paging_populate(d, gfn);
         return NULL;
     }
 
     ASSERT(mfn_valid(mfn));
 
     if ( writable )
-        paging_mark_dirty(current->domain, mfn);
+        paging_mark_dirty(d, mfn);
 
     return map_domain_page(mfn);
 }
@@ -2182,7 +2180,6 @@
     void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
 {
     struct vcpu *curr = current;
-    struct p2m_domain *p2m;
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
@@ -2204,8 +2201,6 @@
         return HVMCOPY_unhandleable;
 #endif
 
-    p2m = p2m_get_hostp2m(curr->domain);
-
     while ( todo > 0 )
     {
         count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
@@ -2229,11 +2224,11 @@
             gfn = addr >> PAGE_SHIFT;
         }
 
-        mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt));
+        mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn, &p2mt));
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            p2m_mem_paging_populate(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -3650,7 +3645,6 @@
     {
         struct xen_hvm_modified_memory a;
         struct domain *d;
-        struct p2m_domain *p2m;
         unsigned long pfn;
 
         if ( copy_from_guest(&a, arg, 1) )
@@ -3678,14 +3672,13 @@
         if ( !paging_mode_log_dirty(d) )
             goto param_fail3;
 
-        p2m = p2m_get_hostp2m(d);
         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
         {
             p2m_type_t t;
-            mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
+            mfn_t mfn = gfn_to_mfn(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
+                p2m_mem_paging_populate(d, pfn);
 
                 rc = -EINVAL;
                 goto param_fail3;
@@ -3724,7 +3717,7 @@
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
-            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t);
+            gfn_to_mfn_unshare(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
             else if ( p2m_is_readonly(t) )
@@ -3743,7 +3736,6 @@
     {
         struct xen_hvm_set_mem_type a;
         struct domain *d;
-        struct p2m_domain *p2m;
         unsigned long pfn;
         
         /* Interface types to internal p2m types */
@@ -3773,17 +3765,15 @@
         if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
             goto param_fail4;
 
-        p2m = p2m_get_hostp2m(d);
         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
         {
             p2m_type_t t;
             p2m_type_t nt;
             mfn_t mfn;
-            mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
+            mfn = gfn_to_mfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
-
+                p2m_mem_paging_populate(d, pfn);
                 rc = -EINVAL;
                 goto param_fail4;
             }
@@ -3801,7 +3791,7 @@
             }
             else
             {
-                nt = p2m_change_type(p2m, pfn, t, memtype[a.hvmmem_type]);
+                nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
                 if ( nt != t )
                 {
                     gdprintk(XENLOG_WARNING,
@@ -3877,7 +3867,7 @@
             mfn_t mfn;
             int success;
 
-            mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
+            mfn = gfn_to_mfn_unshare(d, pfn, &t);
 
             p2m_lock(p2m);
             success = p2m->set_entry(p2m, pfn, mfn, 0, t, 
memaccess[a.hvmmem_access]);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/mtrr.c   Thu Jun 02 13:16:52 2011 +0100
@@ -390,7 +390,7 @@
     {
         struct domain *d = v->domain;
         p2m_type_t p2mt;
-        gfn_to_mfn(p2m_get_hostp2m(d), paddr_to_pfn(gpaddr), &p2mt);
+        gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
         if (p2m_is_ram(p2mt))
             gdprintk(XENLOG_WARNING,
                     "Conflict occurs for a given guest l1e flags:%x "
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/stdvga.c Thu Jun 02 13:16:52 2011 +0100
@@ -469,7 +469,7 @@
     int i;
     int sign = p->df ? -1 : 1;
     p2m_type_t p2mt;
-    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+    struct domain *d = current->domain;
 
     if ( p->data_is_ptr )
     {
@@ -482,7 +482,7 @@
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
+                    (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
@@ -504,7 +504,7 @@
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
+                    (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
                         return 0;
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Jun 02 13:16:52 2011 +0100
@@ -244,7 +244,7 @@
     {
         if ( c->cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(gfn_to_mfn(p2m, c->cr3 >> PAGE_SHIFT, &p2mt));
+            mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
@@ -1127,6 +1127,7 @@
     unsigned long gfn = gpa >> PAGE_SHIFT;
     mfn_t mfn;
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
     struct p2m_domain *p2m = NULL;
 
     ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul, 0, 0, 0, 0);
@@ -1143,7 +1144,7 @@
         p2m = p2m_get_p2m(v);
         _d.gpa = gpa;
         _d.qualification = 0;
-        _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
+        _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, 
p2m_query));
         
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
@@ -1163,7 +1164,7 @@
     if ( p2m == NULL )
         p2m = p2m_get_p2m(v);
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest);
     gdprintk(XENLOG_ERR,
          "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
          gpa, mfn_x(mfn), p2mt);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 02 13:16:52 2011 +0100
@@ -476,8 +476,7 @@
     {
         if ( cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
-                cr3 >> PAGE_SHIFT, &p2mt));
+            mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
@@ -993,8 +992,7 @@
     if ( cr3 & 0x1fUL )
         goto crash;
 
-    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
-        cr3 >> PAGE_SHIFT, &p2mt));
+    mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
     if ( !p2m_is_ram(p2mt) )
         goto crash;
 
@@ -1752,8 +1750,7 @@
     if ( apic_va == NULL )
         return -ENOMEM;
     share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
-    set_mmio_p2m_entry(
-        p2m_get_hostp2m(d), paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
+    set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
         _mfn(virt_to_mfn(apic_va)));
     d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
 
@@ -1959,7 +1956,7 @@
     unsigned long gla, gfn = gpa >> PAGE_SHIFT;
     mfn_t mfn;
     p2m_type_t p2mt;
-    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+    struct domain *d = current->domain;
 
     if ( tb_init_done )
     {
@@ -1972,7 +1969,7 @@
 
         _d.gpa = gpa;
         _d.qualification = qualification;
-        _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
+        _d.mfn = mfn_x(gfn_to_mfn_query(d, gfn, &_d.p2mt));
         
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
@@ -1988,7 +1985,7 @@
         return;
 
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
@@ -2000,7 +1997,7 @@
              (qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
              gpa, mfn_x(mfn), p2mt);
 
-    ept_walk_table(current->domain, gfn);
+    ept_walk_table(d, gfn);
 
     if ( qualification & EPT_GLA_VALID )
     {
@@ -2008,7 +2005,7 @@
         gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
     }
 
-    domain_crash(current->domain);
+    domain_crash(d);
 }
 
 static void vmx_failed_vmentry(unsigned int exit_reason,
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm.c Thu Jun 02 13:16:52 2011 +0100
@@ -1808,8 +1808,7 @@
     if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
     {
         /* Translate foreign guest addresses. */
-        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pg_dom),
-            l1e_get_pfn(nl1e), &p2mt));
+        mfn = mfn_x(gfn_to_mfn(pg_dom, l1e_get_pfn(nl1e), &p2mt));
         if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
             return -EINVAL;
         ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
@@ -3482,13 +3481,13 @@
 
             req.ptr -= cmd;
             gmfn = req.ptr >> PAGE_SHIFT;
-            mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pt_owner), gmfn, &p2mt));
+            mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt));
             if ( !p2m_is_valid(p2mt) )
               mfn = INVALID_MFN;
 
             if ( p2m_is_paged(p2mt) )
             {
-                p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn);
+                p2m_mem_paging_populate(pg_owner, gmfn);
 
                 rc = -ENOENT;
                 break;
@@ -3520,13 +3519,11 @@
                 {
                     l1_pgentry_t l1e = l1e_from_intpte(req.val);
                     p2m_type_t l1e_p2mt;
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner),
-                        l1e_get_pfn(l1e), &l1e_p2mt);
+                    gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt);
 
                     if ( p2m_is_paged(l1e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
-                            l1e_get_pfn(l1e));
+                        p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
                         rc = -ENOENT;
                         break;
                     }
@@ -3544,7 +3541,7 @@
                         /* Unshare the page for RW foreign mappings */
                         if ( l1e_get_flags(l1e) & _PAGE_RW )
                         {
-                            rc = 
mem_sharing_unshare_page(p2m_get_hostp2m(pg_owner), 
+                            rc = mem_sharing_unshare_page(pg_owner, 
                                                           l1e_get_pfn(l1e), 
                                                           0);
                             if ( rc )
@@ -3562,12 +3559,11 @@
                 {
                     l2_pgentry_t l2e = l2e_from_intpte(req.val);
                     p2m_type_t l2e_p2mt;
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), 
&l2e_p2mt);
+                    gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt);
 
                     if ( p2m_is_paged(l2e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
-                            l2e_get_pfn(l2e));
+                        p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e));
                         rc = -ENOENT;
                         break;
                     }
@@ -3591,12 +3587,11 @@
                 {
                     l3_pgentry_t l3e = l3e_from_intpte(req.val);
                     p2m_type_t l3e_p2mt;
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), 
&l3e_p2mt);
+                    gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt);
 
                     if ( p2m_is_paged(l3e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
-                            l3e_get_pfn(l3e));
+                        p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e));
                         rc = -ENOENT;
                         break;
                     }
@@ -3620,13 +3615,11 @@
                 {
                     l4_pgentry_t l4e = l4e_from_intpte(req.val);
                     p2m_type_t l4e_p2mt;
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner),
-                        l4e_get_pfn(l4e), &l4e_p2mt);
+                    gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt);
 
                     if ( p2m_is_paged(l4e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
-                            l4e_get_pfn(l4e));
+                        p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e));
                         rc = -ENOENT;
                         break;
                     }
@@ -4003,7 +3996,7 @@
         p2mt = p2m_grant_map_ro;
     else
         p2mt = p2m_grant_map_rw;
-    rc = guest_physmap_add_entry(p2m_get_hostp2m(current->domain),
+    rc = guest_physmap_add_entry(current->domain,
                                  addr >> PAGE_SHIFT, frame, 0, p2mt);
     if ( rc )
         return GNTST_general_error;
@@ -4053,7 +4046,7 @@
     if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
         return GNTST_general_error;
 
-    old_mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &type);
+    old_mfn = gfn_to_mfn(d, gfn, &type);
     if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
     {
         gdprintk(XENLOG_WARNING,
@@ -4652,8 +4645,7 @@
         {
             p2m_type_t p2mt;
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d),
-                                                xatp.idx, &p2mt));
+            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
             /* If the page is still shared, exit early */
             if ( p2m_is_shared(p2mt) )
             {
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Thu Jun 02 13:16:52 2011 +0100
@@ -92,12 +92,14 @@
                                    p2m_type_t *p2mt,
                                    uint32_t *rc) 
 {
+    p2m_access_t a;
+
     /* Translate the gfn, unsharing if shared */
-    *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt);
+    *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare);
     if ( p2m_is_paging(*p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn_x(gfn));
-
+        ASSERT(!p2m_is_nestedp2m(p2m));
+        p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
         *rc = _PAGE_PAGED;
         return NULL;
     }
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Thu Jun 02 13:16:52 2011 +0100
@@ -54,13 +54,16 @@
     mfn_t top_mfn;
     void *top_map;
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
     walk_t gw;
 
     /* Get the top-level table's MFN */
-    top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt);
+    top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT, 
+                                  &p2mt, &p2ma, p2m_unshare);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
+        ASSERT(!p2m_is_nestedp2m(p2m));
+        p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
         return INVALID_GFN;
@@ -89,10 +92,11 @@
     if ( missing == 0 )
     {
         gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
-        gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt);
+        gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn_x(gfn));
+            ASSERT(!p2m_is_nestedp2m(p2m));
+            p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
             return INVALID_GFN;
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Jun 02 13:16:52 2011 +0100
@@ -71,7 +71,7 @@
 
     /* set l1e entries of P2M table to be read-only. */
     for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
-        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
+        p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
 
     flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
@@ -91,7 +91,7 @@
 
     /* set l1e entries of P2M table with normal mode */
     for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
-        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw);
+        p2m_change_type(d, i, p2m_ram_logdirty, p2m_ram_rw);
 
     flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
@@ -107,7 +107,7 @@
 
     /* set l1e entries of P2M table to be read-only. */
     for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
-        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
+        p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
 
     flush_tlb_mask(d->domain_dirty_cpumask);
 }
@@ -201,8 +201,7 @@
     hap_unlock(d);
 
     /* set l1e entries of P2M table to be read-only. */
-    p2m_change_entry_type_global(p2m_get_hostp2m(d),
-        p2m_ram_rw, p2m_ram_logdirty);
+    p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
     flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
 }
@@ -214,16 +213,14 @@
     hap_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
-    p2m_change_entry_type_global(p2m_get_hostp2m(d),
-        p2m_ram_logdirty, p2m_ram_rw);
+    p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
     return 0;
 }
 
 static void hap_clean_dirty_bitmap(struct domain *d)
 {
     /* set l1e entries of P2M table to be read-only. */
-    p2m_change_entry_type_global(p2m_get_hostp2m(d),
-        p2m_ram_rw, p2m_ram_logdirty);
+    p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
     flush_tlb_mask(d->domain_dirty_cpumask);
 }
 
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Thu Jun 02 13:16:52 2011 +0100
@@ -123,9 +123,10 @@
 {
     mfn_t mfn;
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
 
-    /* we use gfn_to_mfn_query() function to walk L0 P2M table */
-    mfn = gfn_to_mfn_query(p2m, L1_gpa >> PAGE_SHIFT, &p2mt);
+    /* walk L0 P2M table */
+    mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, 
p2m_query);
 
     if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) )
         return NESTEDHVM_PAGEFAULT_ERROR;
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_event.c       Thu Jun 02 13:16:52 2011 +0100
@@ -252,7 +252,7 @@
             /* Get MFN of ring page */
             guest_get_eff_l1e(v, ring_addr, &l1e);
             gfn = l1e_get_pfn(l1e);
-            ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt);
+            ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
 
             rc = -EINVAL;
             if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
@@ -261,7 +261,7 @@
             /* Get MFN of shared page */
             guest_get_eff_l1e(v, shared_addr, &l1e);
             gfn = l1e_get_pfn(l1e);
-            shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, 
&p2mt);
+            shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
 
             rc = -EINVAL;
             if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/mem_paging.c
--- a/xen/arch/x86/mm/mem_paging.c      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_paging.c      Thu Jun 02 13:16:52 2011 +0100
@@ -28,9 +28,6 @@
 int mem_paging_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
                       XEN_GUEST_HANDLE(void) u_domctl)
 {
-    int rc;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
     /* Only HAP is supported */
     if ( !hap_enabled(d) )
          return -ENODEV;
@@ -40,37 +37,35 @@
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE:
     {
         unsigned long gfn = mec->gfn;
-        rc = p2m_mem_paging_nominate(p2m, gfn);
+        return p2m_mem_paging_nominate(d, gfn);
     }
     break;
 
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT:
     {
         unsigned long gfn = mec->gfn;
-        rc = p2m_mem_paging_evict(p2m, gfn);
+        return p2m_mem_paging_evict(d, gfn);
     }
     break;
 
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP:
     {
         unsigned long gfn = mec->gfn;
-        rc = p2m_mem_paging_prep(p2m, gfn);
+        return p2m_mem_paging_prep(d, gfn);
     }
     break;
 
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME:
     {
-        p2m_mem_paging_resume(p2m);
-        rc = 0;
+        p2m_mem_paging_resume(d);
+        return 0;
     }
     break;
 
     default:
-        rc = -ENOSYS;
+        return -ENOSYS;
         break;
     }
-
-    return rc;
 }
 
 
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
@@ -252,7 +252,6 @@
             list_for_each(le, &e->gfns)
             {
                 struct domain *d;
-                struct p2m_domain *p2m;
                 p2m_type_t t;
                 mfn_t mfn;
 
@@ -264,8 +263,7 @@
                             g->domain, g->gfn, mfn_x(e->mfn));
                     continue;
                 }
-                p2m = p2m_get_hostp2m(d);
-                mfn = gfn_to_mfn(p2m, g->gfn, &t); 
+                mfn = gfn_to_mfn(d, g->gfn, &t); 
                 if(mfn_x(mfn) != mfn_x(e->mfn))
                     MEM_SHARING_DEBUG("Incorrect P2M for d=%d, PFN=%lx."
                                       "Expecting MFN=%ld, got %ld\n",
@@ -376,7 +374,7 @@
     p2m_type_t p2mt;
     mfn_t mfn;
 
-    mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &p2mt);
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
 
     printk("Debug for domain=%d, gfn=%lx, ", 
             d->domain_id, 
@@ -485,7 +483,7 @@
     return mem_sharing_debug_gfn(d, gfn); 
 }
 
-int mem_sharing_nominate_page(struct p2m_domain *p2m, 
+int mem_sharing_nominate_page(struct domain *d,
                               unsigned long gfn,
                               int expected_refcnt,
                               shr_handle_t *phandle)
@@ -497,12 +495,11 @@
     shr_handle_t handle;
     shr_hash_entry_t *hash_entry;
     struct gfn_info *gfn_info;
-    struct domain *d = p2m->domain;
 
     *phandle = 0UL;
 
     shr_lock(); 
-    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
@@ -540,7 +537,7 @@
     }
 
     /* Change the p2m type */
-    if(p2m_change_type(p2m, gfn, p2mt, p2m_ram_shared) != p2mt) 
+    if(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt) 
     {
         /* This is unlikely, as the type must have changed since we've checked
          * it a few lines above.
@@ -602,7 +599,7 @@
         list_del(&gfn->list);
         d = get_domain_by_id(gfn->domain);
         BUG_ON(!d);
-        BUG_ON(set_shared_p2m_entry(p2m_get_hostp2m(d), gfn->gfn, se->mfn) == 
0);
+        BUG_ON(set_shared_p2m_entry(d, gfn->gfn, se->mfn) == 0);
         put_domain(d);
         list_add(&gfn->list, &se->gfns);
         put_page_and_type(cpage);
@@ -621,7 +618,7 @@
     return ret;
 }
 
-int mem_sharing_unshare_page(struct p2m_domain *p2m,
+int mem_sharing_unshare_page(struct domain *d,
                              unsigned long gfn, 
                              uint16_t flags)
 {
@@ -634,13 +631,12 @@
     struct gfn_info *gfn_info = NULL;
     shr_handle_t handle;
     struct list_head *le;
-    struct domain *d = p2m->domain;
 
     mem_sharing_audit();
     /* Remove the gfn_info from the list */
     shr_lock();
     
-    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
     
     /* Has someone already unshared it? */
     if (!p2m_is_shared(p2mt)) {
@@ -706,7 +702,7 @@
     unmap_domain_page(s);
     unmap_domain_page(t);
 
-    BUG_ON(set_shared_p2m_entry(p2m, gfn, page_to_mfn(page)) == 0);
+    BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) == 0);
     put_page_and_type(old_page);
 
 private_page_found:    
@@ -717,7 +713,7 @@
     else
         atomic_dec(&nr_saved_mfns);
 
-    if(p2m_change_type(p2m, gfn, p2m_ram_shared, p2m_ram_rw) != 
+    if(p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != 
                                                 p2m_ram_shared) 
     {
         printk("Could not change p2m type.\n");
@@ -754,7 +750,7 @@
             shr_handle_t handle;
             if(!mem_sharing_enabled(d))
                 return -EINVAL;
-            rc = mem_sharing_nominate_page(p2m_get_hostp2m(d), gfn, 0, 
&handle);
+            rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
             mec->u.nominate.handle = handle;
             mem_sharing_audit();
         }
@@ -770,8 +766,7 @@
                 return -EINVAL;
             if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0)
                 return -EINVAL;
-            rc = mem_sharing_nominate_page(p2m_get_hostp2m(d),
-                gfn, 3, &handle);
+            rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
             mec->u.nominate.handle = handle;
             mem_sharing_audit();
         }
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:52 2011 +0100
@@ -518,7 +518,7 @@
     {
         p2m_type_t t;
 
-        gfn_to_mfn_query(p2m, gpfn + i, &t);
+        gfn_to_mfn_query(d, gpfn + i, &t);
 
         if ( t == p2m_populate_on_demand )
             pod++;
@@ -558,7 +558,7 @@
         mfn_t mfn;
         p2m_type_t t;
 
-        mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
+        mfn = gfn_to_mfn_query(d, gpfn + i, &t);
         if ( t == p2m_populate_on_demand )
         {
             set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, 
p2m->default_access);
@@ -606,9 +606,9 @@
     return ret;
 }
 
-void
-p2m_pod_dump_data(struct p2m_domain *p2m)
+void p2m_pod_dump_data(struct domain *d)
 {
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     printk("    PoD entries=%d cachesize=%d\n",
            p2m->pod.entry_count, p2m->pod.count);
 }
@@ -639,7 +639,7 @@
     for ( i=0; i<SUPERPAGE_PAGES; i++ )
     {
         
-        mfn = gfn_to_mfn_query(p2m, gfn + i, &type);
+        mfn = gfn_to_mfn_query(d, gfn + i, &type);
 
         if ( i == 0 )
         {
@@ -767,7 +767,7 @@
     /* First, get the gfn list, translate to mfns, and map the pages. */
     for ( i=0; i<count; i++ )
     {
-        mfns[i] = gfn_to_mfn_query(p2m, gfns[i], types + i);
+        mfns[i] = gfn_to_mfn_query(d, gfns[i], types + i);
         /* If this is ram, and not a pagetable or from the xen heap, and 
probably not mapped
            elsewhere, map it; otherwise, skip. */
         if ( p2m_is_ram(types[i])
@@ -906,7 +906,7 @@
     /* FIXME: Figure out how to avoid superpages */
     for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
     {
-        gfn_to_mfn_query(p2m, i, &t );
+        gfn_to_mfn_query(p2m->domain, i, &t );
         if ( p2m_is_ram(t) )
         {
             gfns[j] = i;
@@ -1095,7 +1095,7 @@
     /* Make sure all gpfns are unused */
     for ( i = 0; i < (1UL << order); i++ )
     {
-        omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
         if ( p2m_is_ram(ot) )
         {
             printk("%s: gfn_to_mfn returned type %d!\n",
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
@@ -677,10 +677,9 @@
     return mfn;
 }
 
-
 static mfn_t
-p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, 
p2m_access_t *a,
-               p2m_query_t q)
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, 
+               p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
 {
     mfn_t mfn;
     paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
@@ -697,8 +696,6 @@
     /* Not implemented except with EPT */
     *a = p2m_access_rwx; 
 
-    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
-
     if ( gfn > p2m->max_mapped_pfn )
         /* This pfn is higher than the highest the p2m map currently holds */
         return _mfn(INVALID_MFN);
@@ -707,6 +704,8 @@
     if ( p2m == p2m_get_hostp2m(current->domain) )
         return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q);
 
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+
 #if CONFIG_PAGING_LEVELS >= 4
     {
         l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
@@ -1059,7 +1058,7 @@
 
         if ( test_linear && (gfn <= p2m->max_mapped_pfn) )
         {
-            lp2mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &type));
+            lp2mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query));
             if ( lp2mfn != mfn_x(p2mfn) )
             {
                 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -123,9 +123,10 @@
     return p2m_init_nestedp2m(d);
 }
 
-void p2m_change_entry_type_global(struct p2m_domain *p2m,
+void p2m_change_entry_type_global(struct domain *d,
                                   p2m_type_t ot, p2m_type_t nt)
 {
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_lock(p2m);
     p2m->change_entry_type_global(p2m, ot, nt);
     p2m_unlock(p2m);
@@ -302,7 +303,11 @@
     {
         mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
-            BUG_ON(mem_sharing_unshare_page(p2m, gfn, 
MEM_SHARING_DESTROY_GFN));
+        {
+            ASSERT(!p2m_is_nestedp2m(p2m));
+            BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
+        }
+
     }
 #endif
 
@@ -369,9 +374,10 @@
 }
 
 void
-guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
+guest_physmap_remove_page(struct domain *d, unsigned long gfn,
                           unsigned long mfn, unsigned int page_order)
 {
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_lock(p2m);
     audit_p2m(p2m, 1);
     p2m_remove_page(p2m, gfn, mfn, page_order);
@@ -380,11 +386,11 @@
 }
 
 int
-guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                         unsigned long mfn, unsigned int page_order, 
                         p2m_type_t t)
 {
-    struct domain *d = p2m->domain;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     unsigned long i, ogfn;
     p2m_type_t ot;
     mfn_t omfn;
@@ -422,7 +428,7 @@
     /* First, remove m->p mappings for existing p->m mappings */
     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
         if ( p2m_is_grant(ot) )
         {
             /* Really shouldn't be unmapping grant maps this way */
@@ -461,7 +467,7 @@
              * address */
             P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                       mfn + i, ogfn, gfn + i);
-            omfn = gfn_to_mfn_query(p2m, ogfn, &ot);
+            omfn = gfn_to_mfn_query(d, ogfn, &ot);
             if ( p2m_is_ram(ot) )
             {
                 ASSERT(mfn_valid(omfn));
@@ -507,17 +513,18 @@
 
 /* Modify the p2m type of a single gfn from ot to nt, returning the 
  * entry's previous type.  Resets the access permissions. */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, 
+p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 
                            p2m_type_t ot, p2m_type_t nt)
 {
     p2m_type_t pt;
     mfn_t mfn;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 
     p2m_lock(p2m);
 
-    mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+    mfn = gfn_to_mfn_query(d, gfn, &pt);
     if ( pt == ot )
         set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
 
@@ -527,19 +534,20 @@
 }
 
 int
-set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
     int rc = 0;
     p2m_type_t ot;
     mfn_t omfn;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
-    if ( !paging_mode_translate(p2m->domain) )
+    if ( !paging_mode_translate(d) )
         return 0;
 
-    omfn = gfn_to_mfn_query(p2m, gfn, &ot);
+    omfn = gfn_to_mfn_query(d, gfn, &ot);
     if ( p2m_is_grant(ot) )
     {
-        domain_crash(p2m->domain);
+        domain_crash(d);
         return 0;
     }
     else if ( p2m_is_ram(ot) )
@@ -556,21 +564,22 @@
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            mfn_x(gfn_to_mfn(p2m, gfn, &ot)));
+            mfn_x(gfn_to_mfn(d, gfn, &ot)));
     return rc;
 }
 
 int
-clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn)
+clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
 {
     int rc = 0;
     mfn_t mfn;
     p2m_type_t t;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
-    if ( !paging_mode_translate(p2m->domain) )
+    if ( !paging_mode_translate(d) )
         return 0;
 
-    mfn = gfn_to_mfn(p2m, gfn, &t);
+    mfn = gfn_to_mfn(d, gfn, &t);
 
     /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
     if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
@@ -588,8 +597,9 @@
 }
 
 int
-set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
+set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc = 0;
     int need_lock = !p2m_locked_by_me(p2m);
     p2m_type_t ot;
@@ -598,7 +608,7 @@
     if ( !paging_mode_translate(p2m->domain) )
         return 0;
 
-    omfn = gfn_to_mfn_query(p2m, gfn, &ot);
+    omfn = gfn_to_mfn_query(p2m->domain, gfn, &ot);
     /* At the moment we only allow p2m change if gfn has already been made
      * sharable first */
     ASSERT(p2m_is_shared(ot));
@@ -620,14 +630,15 @@
 }
 
 #ifdef __x86_64__
-int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
 {
     struct page_info *page;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
     mfn_t mfn;
     int ret;
 
-    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn(p2m->domain, gfn, &p2mt);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
@@ -664,15 +675,15 @@
     return ret;
 }
 
-int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
 {
     struct page_info *page;
     p2m_type_t p2mt;
     mfn_t mfn;
-    struct domain *d = p2m->domain;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Get mfn */
-    mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
     if ( unlikely(!mfn_valid(mfn)) )
         return -EINVAL;
 
@@ -702,11 +713,10 @@
     return 0;
 }
 
-void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
 {
     struct vcpu *v = current;
     mem_event_request_t req;
-    struct domain *d = p2m->domain;
 
     /* Check that there's space on the ring for this request */
     if ( mem_event_check_ring(d) == 0)
@@ -721,12 +731,12 @@
     }
 }
 
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
 {
     struct vcpu *v = current;
     mem_event_request_t req;
     p2m_type_t p2mt;
-    struct domain *d = p2m->domain;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
     if ( mem_event_check_ring(d) )
@@ -738,7 +748,7 @@
     /* Fix p2m mapping */
     /* XXX: It seems inefficient to have this here, as it's only needed
      *      in one case (ept guest accessing paging out page) */
-    gfn_to_mfn(p2m, gfn, &p2mt);
+    gfn_to_mfn(d, gfn, &p2mt);
     if ( p2mt == p2m_ram_paged )
     {
         p2m_lock(p2m);
@@ -768,9 +778,10 @@
     mem_event_put_request(d, &req);
 }
 
-int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
 {
     struct page_info *page;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Get a free page */
     page = alloc_domheap_page(p2m->domain, 0);
@@ -786,9 +797,9 @@
     return 0;
 }
 
-void p2m_mem_paging_resume(struct p2m_domain *p2m)
+void p2m_mem_paging_resume(struct domain *d)
 {
-    struct domain *d = p2m->domain;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     mem_event_response_t rsp;
     p2m_type_t p2mt;
     mfn_t mfn;
@@ -799,7 +810,7 @@
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
     {
-        mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
+        mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
         p2m_lock(p2m);
         set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
         set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Jun 02 13:16:52 2011 +0100
@@ -3712,7 +3712,7 @@
 
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
-            mfn_t mfn = gfn_to_mfn(p2m, begin_pfn + i, &t);
+            mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
             struct page_info *page;
             int dirty = 0;
             paddr_t sl1ma = dirty_vram->sl1ma[i];
@@ -3797,7 +3797,7 @@
                 /* was clean for more than two seconds, try to disable guest
                  * write access */
                 for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = gfn_to_mfn(p2m, i, &t);
+                    mfn_t mfn = gfn_to_mfn(d, i, &t);
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Jun 02 13:16:52 2011 +0100
@@ -2257,7 +2257,6 @@
     shadow_l4e_t *sl4p = se;
     mfn_t sl3mfn = _mfn(INVALID_MFN);
     struct domain *d = v->domain;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
     int result = 0;
 
@@ -2266,7 +2265,7 @@
     if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
     {
         gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
-        mfn_t gl3mfn = gfn_to_mfn_query(p2m, gl3gfn, &p2mt);
+        mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
         else if ( p2mt != p2m_populate_on_demand )
@@ -2317,14 +2316,13 @@
     mfn_t sl2mfn = _mfn(INVALID_MFN);
     p2m_type_t p2mt;
     int result = 0;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     perfc_incr(shadow_validate_gl3e_calls);
 
     if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
     {
         gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
-        mfn_t gl2mfn = gfn_to_mfn_query(p2m, gl2gfn, &p2mt);
+        mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
         else if ( p2mt != p2m_populate_on_demand )
@@ -2348,7 +2346,6 @@
     guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
     shadow_l2e_t *sl2p = se;
     mfn_t sl1mfn = _mfn(INVALID_MFN);
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
     p2m_type_t p2mt;
     int result = 0;
 
@@ -2374,7 +2371,7 @@
         }
         else
         {
-            mfn_t gl1mfn = gfn_to_mfn_query(p2m, gl1gfn, &p2mt);
+            mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
             if ( p2m_is_ram(p2mt) )
                 sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 
             else if ( p2mt != p2m_populate_on_demand )
@@ -2435,7 +2432,6 @@
     shadow_l1e_t *sl1p = se;
     gfn_t gfn;
     mfn_t gmfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
     p2m_type_t p2mt;
     int result = 0;
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -2445,7 +2441,7 @@
     perfc_incr(shadow_validate_gl1e_calls);
 
     gfn = guest_l1e_get_gfn(new_gl1e);
-    gmfn = gfn_to_mfn_query(p2m, gfn, &p2mt);
+    gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
 
     l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
     result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2505,7 +2501,7 @@
             shadow_l1e_t nsl1e;
 
             gfn = guest_l1e_get_gfn(gl1e);
-            gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
+            gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
             l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
             rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn);
 
@@ -2828,7 +2824,7 @@
 
         /* Look at the gfn that the l1e is pointing at */
         gfn = guest_l1e_get_gfn(gl1e);
-        gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
+        gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
 
         /* Propagate the entry.  */
         l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
@@ -3186,7 +3182,7 @@
 
     /* What mfn is the guest trying to access? */
     gfn = guest_l1e_get_gfn(gw.l1e);
-    gmfn = gfn_to_mfn_guest(p2m_get_hostp2m(d), gfn, &p2mt);
+    gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);
 
     if ( shadow_mode_refcounts(d) && 
          ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
@@ -4296,7 +4292,7 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
             }
@@ -4309,7 +4305,7 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
                                            ? SH_type_l2h_shadow 
@@ -4707,7 +4703,7 @@
     if ( gcr3 == gpa )
         fast_path = 1;
 
-    gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> 
PAGE_SHIFT), &p2mt);
+    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
     if ( !mfn_valid(gmfn) || !p2m_is_ram(p2mt) )
     {
         printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
@@ -4727,7 +4723,7 @@
         {
             /* retrieving the l2s */
             gl2a = guest_l3e_get_paddr(gl3e[i]);
-            gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gl2a >> 
PAGE_SHIFT), &p2mt);
+            gmfn = gfn_to_mfn_query(v->domain, _gfn(gl2a >> PAGE_SHIFT), 
&p2mt);
             smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_pae_shadow);
         }
 
@@ -4762,7 +4758,7 @@
 
     shadow_lock(v->domain);
 
-    gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> 
PAGE_SHIFT), &p2mt);
+    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
 #if GUEST_PAGING_LEVELS == 2
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
 #else
@@ -4802,10 +4798,9 @@
     mfn_t mfn;
     p2m_type_t p2mt;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     /* Translate the VA to a GFN */
-    gfn = sh_gva_to_gfn(v, p2m, vaddr, &pfec);
+    gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec);
     if ( gfn == INVALID_GFN ) 
     {
         if ( is_hvm_vcpu(v) )
@@ -4818,9 +4813,9 @@
     /* Translate the GFN to an MFN */
     /* PoD: query only if shadow lock is held (to avoid deadlock) */
     if ( shadow_locked_by_me(v->domain) )
-        mfn = gfn_to_mfn_query(p2m, _gfn(gfn), &p2mt);
+        mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
     else
-        mfn = gfn_to_mfn(p2m, _gfn(gfn), &p2mt);
+        mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
         
     if ( p2m_is_readonly(p2mt) )
         return _mfn(READONLY_GFN);
@@ -5226,7 +5221,7 @@
             {
                 gfn = guest_l1e_get_gfn(*gl1e);
                 mfn = shadow_l1e_get_mfn(*sl1e);
-                gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, 
&p2mt);
+                gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
                 if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -5270,7 +5265,6 @@
     shadow_l2e_t *sl2e;
     mfn_t mfn, gmfn, gl2mfn;
     gfn_t gfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
     p2m_type_t p2mt;
     char *s;
     int done = 0;
@@ -5298,7 +5292,7 @@
             mfn = shadow_l2e_get_mfn(*sl2e);
             gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
                 ? get_fl1_shadow_status(v, gfn)
-                : get_shadow_status(v, gfn_to_mfn_query(p2m, gfn, &p2mt), 
+                : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt),
                                     SH_type_l1_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
@@ -5306,7 +5300,7 @@
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn), 
                            (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
-                           : mfn_x(gfn_to_mfn_query(p2m,
+                           : mfn_x(gfn_to_mfn_query(v->domain,
                                    gfn, &p2mt)), mfn_x(gmfn), mfn_x(mfn));
         }
     });
@@ -5346,7 +5340,7 @@
         {
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
-            gmfn = get_shadow_status(v, 
gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt), 
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, 
&p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
                                        is_pv_32on64_vcpu(v))
                                       && !shadow_mode_external(v->domain)
@@ -5394,7 +5388,7 @@
         {
             gfn = guest_l4e_get_gfn(*gl4e);
             mfn = shadow_l4e_get_mfn(*sl4e);
-            gmfn = get_shadow_status(v, 
gfn_to_mfn_query(p2m_get_hostp2m(v->domain),
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain,
                                      gfn, &p2mt), 
                                      SH_type_l3_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/shadow/types.h    Thu Jun 02 13:16:52 2011 +0100
@@ -193,9 +193,9 @@
 
  /* Override gfn_to_mfn to work with gfn_t */
 #undef gfn_to_mfn_query
-#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_query)
+#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_query)
 #undef gfn_to_mfn_guest
-#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_guest)
+#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_guest)
 
 /* The shadow types needed for the various levels. */
 
diff -r 0d3e0a571fdd -r de0a051b36ce xen/arch/x86/msi.c
--- a/xen/arch/x86/msi.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/msi.c        Thu Jun 02 13:16:52 2011 +0100
@@ -663,7 +663,7 @@
             WARN();
 
         if ( dev->domain )
-            p2m_change_entry_type_global(p2m_get_hostp2m(dev->domain),
+            p2m_change_entry_type_global(dev->domain,
                                          p2m_mmio_direct, p2m_mmio_direct);
         if ( !dev->domain || !paging_mode_translate(dev->domain) )
         {
diff -r 0d3e0a571fdd -r de0a051b36ce xen/common/grant_table.c
--- a/xen/common/grant_table.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/common/grant_table.c  Thu Jun 02 13:16:52 2011 +0100
@@ -110,7 +110,7 @@
 #define gfn_to_mfn_private(_d, _gfn) ({                     \
     p2m_type_t __p2mt;                                      \
     unsigned long __x;                                      \
-    __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt));  \
+    __x = mfn_x(gfn_to_mfn_unshare((_d), (_gfn), &__p2mt)); \
     BUG_ON(p2m_is_shared(__p2mt)); /* XXX fixme */          \
     if ( !p2m_is_valid(__p2mt) )                            \
         __x = INVALID_MFN;                                  \
@@ -146,16 +146,14 @@
 {
     int rc = GNTST_okay;
 #if defined(P2M_PAGED_TYPES) || defined(P2M_SHARED_TYPES)
-    struct p2m_domain *p2m;
     p2m_type_t p2mt;
     mfn_t mfn;
 
-    p2m = p2m_get_hostp2m(rd);
     if ( readonly )
-        mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+        mfn = gfn_to_mfn(rd, gfn, &p2mt);
     else
     {
-        mfn = gfn_to_mfn_unshare(p2m, gfn, &p2mt);
+        mfn = gfn_to_mfn_unshare(rd, gfn, &p2mt);
         BUG_ON(p2m_is_shared(p2mt));
         /* XXX Here, and above in gfn_to_mfn_private, need to handle
          * XXX failure to unshare. */
@@ -165,7 +163,7 @@
         *frame = mfn_x(mfn);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            p2m_mem_paging_populate(rd, gfn);
             rc = GNTST_eagain;
         }
     } else {
diff -r 0d3e0a571fdd -r de0a051b36ce xen/common/memory.c
--- a/xen/common/memory.c       Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/common/memory.c       Thu Jun 02 13:16:52 2011 +0100
@@ -162,11 +162,11 @@
     unsigned long mfn;
 
 #ifdef CONFIG_X86
-    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
+    mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
         guest_physmap_remove_page(d, gmfn, mfn, 0);
-        p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
+        p2m_mem_paging_drop_page(d, gmfn);
         return 1;
     }
 #else
@@ -363,7 +363,7 @@
                 p2m_type_t p2mt;
 
                 /* Shared pages cannot be exchanged */
-                mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, 
&p2mt));
+                mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt));
                 if ( p2m_is_shared(p2mt) )
                 {
                     rc = -ENOMEM;
diff -r 0d3e0a571fdd -r de0a051b36ce xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/common/tmem_xen.c     Thu Jun 02 13:16:52 2011 +0100
@@ -109,7 +109,7 @@
     struct page_info *page;
     int ret;
 
-    cli_mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(current->domain), cmfn, &t));
+    cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
     if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
             return NULL;
     page = mfn_to_page(cli_mfn);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/include/asm-x86/guest_pt.h
--- a/xen/include/asm-x86/guest_pt.h    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/guest_pt.h    Thu Jun 02 13:16:52 2011 +0100
@@ -53,7 +53,7 @@
 
 /* Override gfn_to_mfn to work with gfn_t */
 #undef gfn_to_mfn
-#define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
+#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
 
 
 /* Types of the guest's page tables and access functions for them */
diff -r 0d3e0a571fdd -r de0a051b36ce xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/mem_sharing.h Thu Jun 02 13:16:52 2011 +0100
@@ -30,16 +30,15 @@
 typedef uint64_t shr_handle_t; 
 
 unsigned int mem_sharing_get_nr_saved_mfns(void);
-int mem_sharing_nominate_page(struct p2m_domain *p2m, 
+int mem_sharing_nominate_page(struct domain *d, 
                               unsigned long gfn,
                               int expected_refcnt,
                               shr_handle_t *phandle);
 #define MEM_SHARING_DESTROY_GFN       (1<<1)
-int mem_sharing_unshare_page(struct p2m_domain *p2m, 
+int mem_sharing_unshare_page(struct domain *d, 
                              unsigned long gfn, 
                              uint16_t flags);
 int mem_sharing_sharing_resume(struct domain *d);
-int mem_sharing_cache_resize(struct p2m_domain *p2m, int new_size);
 int mem_sharing_domctl(struct domain *d, 
                        xen_domctl_mem_sharing_op_t *mec);
 void mem_sharing_init(void);
diff -r 0d3e0a571fdd -r de0a051b36ce xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -359,8 +359,10 @@
     } while (0)
 
 
-/* Read P2M table, mapping pages as we go.
- * Do not populate PoD pages. */
+/* Read a particular P2M table, mapping pages as we go.  Most callers
+ * should _not_ call this directly; use the other gfn_to_mfn_* functions
+ * below unless you know you want to walk a p2m that isn't a domain's
+ * main one. */
 static inline mfn_t
 gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
@@ -370,7 +372,8 @@
 #ifdef __x86_64__
     if ( q == p2m_unshare && p2m_is_shared(*t) )
     {
-        mem_sharing_unshare_page(p2m, gfn, 0);
+        ASSERT(!p2m_is_nestedp2m(p2m));
+        mem_sharing_unshare_page(p2m->domain, gfn, 0);
         mfn = p2m->get_entry(p2m, gfn, t, a, q);
     }
 #endif
@@ -390,10 +393,11 @@
 
 
 /* General conversion function from gfn to mfn */
-static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
-                                     unsigned long gfn, p2m_type_t *t,
-                                     p2m_query_t q)
+static inline mfn_t gfn_to_mfn_type(struct domain *d,
+                                    unsigned long gfn, p2m_type_t *t,
+                                    p2m_query_t q)
 {
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_access_t a;
 
     if ( !p2m || !paging_mode_translate(p2m->domain) )
@@ -407,17 +411,17 @@
     return gfn_to_mfn_type_p2m(p2m, gfn, t, &a, q);
 }
 
-#define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
-#define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_query)
-#define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_guest)
-#define gfn_to_mfn_unshare(p, g, t) _gfn_to_mfn_type((p), (g), (t), 
p2m_unshare)
+#define gfn_to_mfn(d, g, t)         gfn_to_mfn_type((d), (g), (t), p2m_alloc)
+#define gfn_to_mfn_query(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_query)
+#define gfn_to_mfn_guest(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_guest)
+#define gfn_to_mfn_unshare(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_unshare)
 
 /* Compatibility function exporting the old untyped interface */
 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
 {
     mfn_t mfn;
     p2m_type_t t;
-    mfn = gfn_to_mfn(d->arch.p2m, gpfn, &t);
+    mfn = gfn_to_mfn(d, gpfn, &t);
     if ( p2m_is_valid(t) )
         return mfn_x(mfn);
     return INVALID_MFN;
@@ -445,45 +449,39 @@
 void p2m_final_teardown(struct domain *d);
 
 /* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                             unsigned long mfn, unsigned int page_order, 
                             p2m_type_t t);
 
-/* Remove a page from a domain's p2m table */
-void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
-                            unsigned long mfn, unsigned int page_order);
-
-/* Set a p2m range as populate-on-demand */
-int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
-                                          unsigned int order);
-
 /* Untyped version for RAM only, for compatibility */
 static inline int guest_physmap_add_page(struct domain *d,
                                          unsigned long gfn,
                                          unsigned long mfn,
                                          unsigned int page_order)
 {
-    return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, 
p2m_ram_rw);
+    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
 }
 
 /* Remove a page from a domain's p2m table */
-static inline void guest_physmap_remove_page(struct domain *d,
+void guest_physmap_remove_page(struct domain *d,
                                unsigned long gfn,
-                               unsigned long mfn, unsigned int page_order)
-{
-    guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
-}
+                               unsigned long mfn, unsigned int page_order);
+
+/* Set a p2m range as populate-on-demand */
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+                                          unsigned int order);
 
 /* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, 
p2m_type_t nt);
+void p2m_change_entry_type_global(struct domain *d, 
+                                  p2m_type_t ot, p2m_type_t nt);
 
 /* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
+p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
                            p2m_type_t ot, p2m_type_t nt);
 
 /* Set mmio addresses in the p2m table (for pass-through) */
-int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
-int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
+int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 
 
 /* 
@@ -491,7 +489,7 @@
  */
 
 /* Dump PoD information about the domain */
-void p2m_pod_dump_data(struct p2m_domain *p2m);
+void p2m_pod_dump_data(struct domain *d);
 
 /* Move all pages from the populate-on-demand cache to the domain page_list
  * (usually in preparation for domain destruction) */
@@ -508,12 +506,6 @@
                              xen_pfn_t gpfn,
                              unsigned int order);
 
-/* Called by p2m code when demand-populating a PoD page */
-int
-p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
-                        unsigned int order,
-                        p2m_query_t q);
-
 /* Scan pod cache when offline/broken page triggered */
 int
 p2m_pod_offline_or_broken_hit(struct page_info *p);
@@ -522,30 +514,31 @@
 void
 p2m_pod_offline_or_broken_replace(struct page_info *p);
 
+
 /*
  * Paging to disk and page-sharing
  */
 
 #ifdef __x86_64__
 /* Modify p2m table for shared gfn */
-int set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 
 /* Check if a nominated gfn is valid to be paged out */
-int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
 /* Evict a frame */
-int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
 /* Tell xenpaging to drop a paged out frame */
-void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
+void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn);
 /* Start populating a paged out frame */
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
 /* Prepare the p2m for paging a frame in */
-int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
 /* Resume normal operation (in case a domain was paused) */
-void p2m_mem_paging_resume(struct p2m_domain *p2m);
+void p2m_mem_paging_resume(struct domain *d);
 #else
-static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned 
long gfn)
+static inline void p2m_mem_paging_drop_page(struct domain *d, unsigned long 
gfn)
 { }
-static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned 
long gfn)
+static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
 { }
 #endif
 
@@ -563,6 +556,10 @@
 { }
 #endif
 
+/* 
+ * Internal functions, only called by other p2m code
+ */
+
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
 
@@ -619,6 +616,11 @@
 #define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
 #endif
 
+/* Called by p2m code when demand-populating a PoD page */
+int
+p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
+                        unsigned int order,
+                        p2m_query_t q);
 
 /*
  * Functions specific to the p2m-pt implementation
@@ -642,7 +644,7 @@
 }
 
 /*
- * Nested p2m: shadow p2m tables used for nexted HVM virtualization 
+ * Nested p2m: shadow p2m tables used for nested HVM virtualization 
  */
 
 /* Flushes specified p2m table */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm/p2m: Make p2m interfaces take struct domain arguments., Xen patchbot-unstable <=