WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [xenppc-unstable] [POWERPC][XEN] Handle foreign page mappings c

To: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Subject: [XenPPC] [xenppc-unstable] [POWERPC][XEN] Handle foreign page mappings correctly
From: Xen patchbot-xenppc-unstable <patchbot-xenppc-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 12 Sep 2006 15:30:33 +0000
Delivery-date: Tue, 12 Sep 2006 08:56:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID fe6690ca97009772f1702c7bf82f55004eb06eae
# Parent  2644e0336cc57aa2acf6009a93c57f2c1e143ff0
[POWERPC][XEN] Handle foreign page mappings correctly

The following patch performs the following:
  - add free_rma() to go with allocate_rma()
  - allocate_rma() returns -EINVAL if an RMA is already allocated
  - PFN_TYPE_REMOTE is now PFN_TYPE_FOREIGN
  - better checking of page_info in pfn2mfn()
  - Stop using RPN and LPN and use PFN and MFN respectively
  - Increase/decrease foreign domain refcount when another domain
    maps/unmaps it

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
 xen/arch/powerpc/domain.c     |    3 -
 xen/arch/powerpc/mm.c         |   96 ++++++++++++++++++++++++++----------------
 xen/arch/powerpc/papr/xlate.c |   60 ++++++++++++++++++++------
 xen/arch/powerpc/usercopy.c   |    4 -
 xen/include/asm-powerpc/mm.h  |    5 +-
 5 files changed, 112 insertions(+), 56 deletions(-)

diff -r 2644e0336cc5 -r fe6690ca9700 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Tue Sep 12 10:53:46 2006 -0400
+++ b/xen/arch/powerpc/domain.c Tue Sep 12 11:01:04 2006 -0400
@@ -244,8 +244,7 @@ void sync_vcpu_execstate(struct vcpu *v)
 
 void domain_relinquish_resources(struct domain *d)
 {
-    if (d->arch.rma_page)
-        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+    free_rma(d);
     free_extents(d);
 }
 
diff -r 2644e0336cc5 -r fe6690ca9700 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Tue Sep 12 10:53:46 2006 -0400
+++ b/xen/arch/powerpc/mm.c     Tue Sep 12 11:01:04 2006 -0400
@@ -299,7 +299,7 @@ int allocate_rma(struct domain *d, unsig
     ulong rma_sz;
 
     if (d->arch.rma_page)
-        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+        return -EINVAL;
 
     d->arch.rma_page = alloc_domheap_pages(d, order, 0);
     if (d->arch.rma_page == NULL) {
@@ -329,50 +329,74 @@ int allocate_rma(struct domain *d, unsig
     return 0;
 }
 
+void free_rma(struct domain *d)
+{
+    if (d->arch.rma_page) {
+        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+    }
+}
+
 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     ulong rma_size_mfn = 1UL << d->arch.rma_order;
     struct page_extents *pe;
-
-    if (type)
-        *type = PFN_TYPE_NONE;
+    ulong mfn = INVALID_MFN;
+    int t = PFN_TYPE_NONE;
 
     /* quick tests first */
-    if (pfn < rma_size_mfn) {
-        if (type)
-            *type = PFN_TYPE_RMA;
-        return pfn + rma_base_mfn;
-    }
-
     if (test_bit(_DOMF_privileged, &d->domain_flags) &&
         cpu_io_mfn(pfn)) {
-        if (type)
-            *type = PFN_TYPE_IO;
-        return pfn;
-    }
-
-    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
-        uint end_pfn = pe->pfn + (1 << pe->order);
-
-        if (pfn >= pe->pfn && pfn < end_pfn) {
-            if (type)
-                *type = PFN_TYPE_LOGICAL;
-            return page_to_mfn(pe->pg) + (pfn - pe->pfn);
-        }
-    }
-
-    /* This hack allows dom0 to map all memory, necessary to
-     * initialize domU state. */
-    if (test_bit(_DOMF_privileged, &d->domain_flags) &&
-        pfn < max_page) {
-        if (type)
-            *type = PFN_TYPE_REMOTE;
-        return pfn;
-    }
-
-    BUG();
-    return INVALID_MFN;
+        t = PFN_TYPE_IO;
+        mfn = pfn;
+    } else {
+        if (pfn < rma_size_mfn) {
+            t = PFN_TYPE_RMA;
+            mfn = pfn + rma_base_mfn;
+        } else {
+            list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+                uint end_pfn = pe->pfn + (1 << pe->order);
+
+                if (pfn >= pe->pfn && pfn < end_pfn) {
+                    t = PFN_TYPE_LOGICAL;
+                    mfn = page_to_mfn(pe->pg) + (pfn - pe->pfn);
+                    break;
+                }
+            }
+        }
+        BUG_ON(t != PFN_TYPE_NONE && page_get_owner(mfn_to_page(mfn)) != d);
+    }
+
+    if (t == PFN_TYPE_NONE) {
+        /* This hack allows dom0 to map all memory, necessary to
+         * initialize domU state. */
+        if (test_bit(_DOMF_privileged, &d->domain_flags) &&
+            mfn_valid(pfn)) {
+            struct page_info *pg;
+
+            /* page better be allocated to some domain but not the caller */
+            pg = mfn_to_page(pfn);
+            if (!(pg->count_info & PGC_allocated))
+                panic("Foreign page: 0x%lx is not owned by any domain\n",
+                      mfn);
+            if (page_get_owner(pg) == d)
+                panic("Foreign page: 0x%lx is owned by this domain\n",
+                      mfn);
+                
+            t = PFN_TYPE_FOREIGN;
+            mfn = pfn;
+        }
+    }
+
+    if (mfn == INVALID_MFN) {
+        printk("%s: Dom[%d] pfn 0x%lx is not a valid page\n",
+               __func__, d->domain_id, pfn);
+    }
+
+    if (type)
+        *type = t;
+
+    return mfn;
 }
 
 void guest_physmap_add_page(
diff -r 2644e0336cc5 -r fe6690ca9700 xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Tue Sep 12 10:53:46 2006 -0400
+++ b/xen/arch/powerpc/papr/xlate.c     Tue Sep 12 11:01:04 2006 -0400
@@ -118,8 +118,8 @@ static void h_enter(struct cpu_user_regs
     int pgshift = PAGE_SHIFT;
     ulong idx;
     int limit = 0;                /* how many PTEs to examine in the PTEG */
-    ulong lpn;
-    ulong rpn;
+    ulong pfn;
+    ulong mfn;
     struct vcpu *v = get_current();
     struct domain *d = v->domain;
     int mtype;
@@ -160,11 +160,11 @@ static void h_enter(struct cpu_user_regs
     /* get the correct logical RPN in terms of 4K pages need to mask
      * off lp bits and unused arpn bits if this is a large page */
 
-    lpn = ~0ULL << (pgshift - PAGE_SHIFT);
-    lpn = pte.bits.rpn & lpn;
-
-    rpn = pfn2mfn(d, lpn, &mtype);
-    if (rpn == INVALID_MFN) {
+    pfn = ~0ULL << (pgshift - PAGE_SHIFT);
+    pfn = pte.bits.rpn & pfn;
+
+    mfn = pfn2mfn(d, pfn, &mtype);
+    if (mfn == INVALID_MFN) {
         regs->gprs[3] =  H_Parameter;
         return;
     }
@@ -173,8 +173,8 @@ static void h_enter(struct cpu_user_regs
         /* only a privilaged dom can access outside IO space */
         if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
             regs->gprs[3] =  H_Privilege;
-            printk("%s: unprivileged access to logical page: 0x%lx\n",
-                   __func__, lpn);
+            printk("%s: unprivileged access to physical page: 0x%lx\n",
+                   __func__, pfn);
             return;
         }
 
@@ -192,7 +192,7 @@ static void h_enter(struct cpu_user_regs
         }
     }
     /* fixup the RPN field of our local PTE copy */
-    pte.bits.rpn = rpn | lp_bits;
+    pte.bits.rpn = mfn | lp_bits;
 
     /* clear reserved bits in high word */
     pte.bits.lock = 0x0;
@@ -211,12 +211,12 @@ static void h_enter(struct cpu_user_regs
 
         /* data manipulations should be done prior to the pte insertion. */
     if ( flags & H_ZERO_PAGE ) {
-        memset((void *)(rpn << PAGE_SHIFT), 0, 1UL << pgshift);
+        memset((void *)(mfn << PAGE_SHIFT), 0, 1UL << pgshift);
     }
 
     if ( flags & H_ICACHE_INVALIDATE ) {
         ulong k;
-        ulong addr = rpn << PAGE_SHIFT;
+        ulong addr = mfn << PAGE_SHIFT;
 
         for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
             dcbst(addr + k);
@@ -229,7 +229,7 @@ static void h_enter(struct cpu_user_regs
 
     if ( flags & H_ICACHE_SYNCHRONIZE ) {
         ulong k;
-        ulong addr = rpn << PAGE_SHIFT;
+        ulong addr = mfn << PAGE_SHIFT;
         for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
             icbi(addr + k);
             sync();
@@ -251,6 +251,26 @@ static void h_enter(struct cpu_user_regs
 
             regs->gprs[3] = H_Success;
             regs->gprs[4] = idx;
+
+            
+            switch (mtype) {
+            case PFN_TYPE_IO:
+                break;
+            case PFN_TYPE_FOREIGN:
+            {
+                struct page_info *pg = mfn_to_page(mfn);
+                struct domain *f = page_get_owner(pg);
+
+                BUG_ON(f == d);
+                get_domain(f);
+            }
+                break;
+            case PFN_TYPE_RMA:
+            case PFN_TYPE_LOGICAL:
+                break;
+            default:
+                BUG();
+            }
 
             return;
         }
@@ -480,9 +500,21 @@ static void h_remove(struct cpu_user_reg
 
     /* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
     /* XXX - I think the spec should be questioned in this case (MFM) */
-    if (pte->bits.v == 0) {
+    if (lpte.bits.v == 0) {
         printk("%s: removing invalid entry\n", __func__);
     }
+
+    if (lpte.bits.v) {
+        ulong mfn = lpte.bits.rpn;
+        if (!cpu_io_mfn(mfn)) {
+            struct page_info *pg = mfn_to_page(mfn);
+            struct domain *f = page_get_owner(pg);
+
+            if (f != d)
+                put_domain(f);
+        }
+    }
+
     asm volatile("eieio; std %1, 0(%0); ptesync"
             :
             : "b" (pte), "r" (0)
diff -r 2644e0336cc5 -r fe6690ca9700 xen/arch/powerpc/usercopy.c
--- a/xen/arch/powerpc/usercopy.c       Tue Sep 12 10:53:46 2006 -0400
+++ b/xen/arch/powerpc/usercopy.c       Tue Sep 12 11:01:04 2006 -0400
@@ -57,10 +57,10 @@ static unsigned long paddr_to_maddr(unsi
     case PFN_TYPE_LOGICAL:
         break;
 
-    case PFN_TYPE_REMOTE:
+    case PFN_TYPE_FOREIGN:
         /* I don't think this should ever happen, but I suppose it
          * could be possible */
-        printk("%s: Dom:%d paddr: 0x%lx type: REMOTE\n",
+        printk("%s: Dom:%d paddr: 0x%lx type: FOREIGN\n",
                __func__, d->domain_id, paddr);
         WARN();
         break;
diff -r 2644e0336cc5 -r fe6690ca9700 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Tue Sep 12 10:53:46 2006 -0400
+++ b/xen/include/asm-powerpc/mm.h      Tue Sep 12 11:01:04 2006 -0400
@@ -154,7 +154,7 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) ) {
-        panic("about to free page\n");
+        panic("about to free page: 0x%lx\n", page_to_mfn(page));
         free_domheap_page(page);
     }
 }
@@ -239,7 +239,7 @@ extern int update_grant_va_mapping(unsig
 #define PFN_TYPE_RMA 1
 #define PFN_TYPE_LOGICAL 2
 #define PFN_TYPE_IO 3
-#define PFN_TYPE_REMOTE 4
+#define PFN_TYPE_FOREIGN 4
 
 extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
 
@@ -259,6 +259,7 @@ static inline unsigned long gmfn_to_mfn(
 #define mfn_to_gmfn(_d, mfn) (mfn)
 
 extern int allocate_rma(struct domain *d, unsigned int order_pages);
+extern void free_rma(struct domain *d);
 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
 extern void free_extents(struct domain *d);
 

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [XenPPC] [xenppc-unstable] [POWERPC][XEN] Handle foreign page mappings correctly, Xen patchbot-xenppc-unstable <=