WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Fix maddr_from_mapped_domain_page()

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Fix maddr_from_mapped_domain_page().
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 21 Aug 2006 13:50:12 +0000
Delivery-date: Mon, 21 Aug 2006 06:50:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 36220033c67f80d698dfe70f080b5b5316aea982
# Parent  069ac1bb78669c19d1570d32d48be8429ec1e81a
[XEN] Fix maddr_from_mapped_domain_page().
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/shadow2.c            |    2 
 xen/arch/x86/x86_32/domain_page.c |   91 ++++++++++++++++++++------------------
 xen/include/asm-x86/domain.h      |    2 
 xen/include/xen/domain_page.h     |   50 +++++++++-----------
 4 files changed, 75 insertions(+), 70 deletions(-)

diff -r 069ac1bb7866 -r 36220033c67f xen/arch/x86/shadow2.c
--- a/xen/arch/x86/shadow2.c    Mon Aug 21 10:28:02 2006 +0100
+++ b/xen/arch/x86/shadow2.c    Mon Aug 21 11:39:27 2006 +0100
@@ -2293,7 +2293,7 @@ static void sh2_destroy_l3_subshadow(str
     for ( i = 0; i < GUEST_L3_PAGETABLE_ENTRIES; i++ ) 
         if ( shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT ) 
             sh2_put_ref(v, shadow_l3e_get_mfn(sl3e[i]),
-                        mapped_domain_page_to_maddr(sl3e));
+                        maddr_from_mapped_domain_page(sl3e));
 }
 #endif
 
diff -r 069ac1bb7866 -r 36220033c67f xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Mon Aug 21 10:28:02 2006 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Mon Aug 21 11:39:27 2006 +0100
@@ -41,7 +41,7 @@ static inline struct vcpu *mapcache_curr
     return v;
 }
 
-void *map_domain_page(unsigned long pfn)
+void *map_domain_page(unsigned long mfn)
 {
     unsigned long va;
     unsigned int idx, i, vcpu;
@@ -58,13 +58,14 @@ void *map_domain_page(unsigned long pfn)
     vcpu  = v->vcpu_id;
     cache = &v->domain->arch.mapcache;
 
-    hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
-    if ( hashent->pfn == pfn && (idx = hashent->idx) != MAPHASHENT_NOTINUSE )
-    {
+    hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(mfn)];
+    if ( hashent->mfn == mfn )
+    {
+        idx = hashent->idx;
         hashent->refcnt++;
         ASSERT(idx < MAPCACHE_ENTRIES);
         ASSERT(hashent->refcnt != 0);
-        ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn);
+        ASSERT(l1e_get_pfn(cache->l1tab[idx]) == mfn);
         goto out;
     }
 
@@ -106,7 +107,7 @@ void *map_domain_page(unsigned long pfn)
 
     spin_unlock(&cache->lock);
 
-    cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
+    cache->l1tab[idx] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
 
  out:
     va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
@@ -118,7 +119,7 @@ void unmap_domain_page(void *va)
     unsigned int idx;
     struct vcpu *v;
     struct mapcache *cache;
-    unsigned long pfn;
+    unsigned long mfn;
     struct vcpu_maphash_entry *hashent;
 
     ASSERT(!in_irq());
@@ -131,12 +132,12 @@ void unmap_domain_page(void *va)
     cache = &v->domain->arch.mapcache;
 
     idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
-    pfn = l1e_get_pfn(cache->l1tab[idx]);
-    hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
+    mfn = l1e_get_pfn(cache->l1tab[idx]);
+    hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(mfn)];
 
     if ( hashent->idx == idx )
     {
-        ASSERT(hashent->pfn == pfn);
+        ASSERT(hashent->mfn == mfn);
         ASSERT(hashent->refcnt != 0);
         hashent->refcnt--;
     }
@@ -145,14 +146,14 @@ void unmap_domain_page(void *va)
         if ( hashent->idx != MAPHASHENT_NOTINUSE )
         {
             /* /First/, zap the PTE. */
-            ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->pfn);
+            ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->mfn);
             cache->l1tab[hashent->idx] = l1e_empty();
             /* /Second/, mark as garbage. */
             set_bit(hashent->idx, cache->garbage);
         }
 
         /* Add newly-freed mapping to the maphash. */
-        hashent->pfn = pfn;
+        hashent->mfn = mfn;
         hashent->idx = idx;
     }
     else
@@ -167,6 +168,7 @@ void mapcache_init(struct domain *d)
 void mapcache_init(struct domain *d)
 {
     unsigned int i, j;
+    struct vcpu_maphash_entry *hashent;
 
     d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
         (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
@@ -174,33 +176,14 @@ void mapcache_init(struct domain *d)
 
     /* Mark all maphash entries as not in use. */
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+    {
         for ( j = 0; j < MAPHASH_ENTRIES; j++ )
-            d->arch.mapcache.vcpu_maphash[i].hash[j].idx =
-                MAPHASHENT_NOTINUSE;
-}
-
-paddr_t mapped_domain_page_to_maddr(void *va) 
-/* Convert a pointer in a mapped domain page to a machine address. 
- * Takes any pointer that's valid for use in unmap_domain_page() */
-{
-    unsigned int idx;
-    struct vcpu *v;
-    struct mapcache *cache;
-    unsigned long pfn;
-
-    ASSERT(!in_irq());
-
-    ASSERT((void *)MAPCACHE_VIRT_START <= va);
-    ASSERT(va < (void *)MAPCACHE_VIRT_END);
-
-    v = mapcache_current_vcpu();
-
-    cache = &v->domain->arch.mapcache;
-
-    idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
-    pfn = l1e_get_pfn(cache->l1tab[idx]);
-    return ((paddr_t) pfn << PAGE_SHIFT 
-            | ((unsigned long) va & ~PAGE_MASK));
+        {
+            hashent = &d->arch.mapcache.vcpu_maphash[i].hash[j];
+            hashent->mfn = ~0UL; /* never valid to map */
+            hashent->idx = MAPHASHENT_NOTINUSE;
+        }
+    }
 }
 
 #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
@@ -209,7 +192,7 @@ static unsigned int inuse_cursor;
 static unsigned int inuse_cursor;
 static DEFINE_SPINLOCK(globalmap_lock);
 
-void *map_domain_page_global(unsigned long pfn)
+void *map_domain_page_global(unsigned long mfn)
 {
     l2_pgentry_t *pl2e;
     l1_pgentry_t *pl1e;
@@ -246,7 +229,7 @@ void *map_domain_page_global(unsigned lo
 
     pl2e = virt_to_xen_l2e(va);
     pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
-    *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
+    *pl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
 
     return (void *)va;
 }
@@ -258,7 +241,7 @@ void unmap_domain_page_global(void *va)
     l1_pgentry_t *pl1e;
     unsigned int idx;
 
-    ASSERT((__va >= IOREMAP_VIRT_START) && (__va <= (IOREMAP_VIRT_END - 1)));
+    ASSERT((__va >= IOREMAP_VIRT_START) && (__va < IOREMAP_VIRT_END));
 
     /* /First/, we zap the PTE. */
     pl2e = virt_to_xen_l2e(__va);
@@ -269,3 +252,29 @@ void unmap_domain_page_global(void *va)
     idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
     set_bit(idx, garbage);
 }
+
+paddr_t maddr_from_mapped_domain_page(void *va) 
+{
+    unsigned long __va = (unsigned long)va;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+    unsigned int idx;
+    struct mapcache *cache;
+    unsigned long mfn;
+
+    if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
+    {
+        cache = &mapcache_current_vcpu()->domain->arch.mapcache;
+        idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
+        mfn = l1e_get_pfn(cache->l1tab[idx]);
+    }
+    else
+    {
+        ASSERT((__va >= IOREMAP_VIRT_START) && (__va < IOREMAP_VIRT_END));
+        pl2e = virt_to_xen_l2e(__va);
+        pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
+        mfn = l1e_get_pfn(*pl1e);
+    }
+    
+    return ((paddr_t)mfn << PAGE_SHIFT) | ((unsigned long)va & ~PAGE_MASK);
+}
diff -r 069ac1bb7866 -r 36220033c67f xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Mon Aug 21 10:28:02 2006 +0100
+++ b/xen/include/asm-x86/domain.h      Mon Aug 21 11:39:27 2006 +0100
@@ -18,7 +18,7 @@ struct trap_bounce {
 #define MAPHASHENT_NOTINUSE ((u16)~0U)
 struct vcpu_maphash {
     struct vcpu_maphash_entry {
-        unsigned long pfn;
+        unsigned long mfn;
         uint16_t      idx;
         uint16_t      refcnt;
     } hash[MAPHASH_ENTRIES];
diff -r 069ac1bb7866 -r 36220033c67f xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Mon Aug 21 10:28:02 2006 +0100
+++ b/xen/include/xen/domain_page.h     Mon Aug 21 11:39:27 2006 +0100
@@ -18,34 +18,34 @@
  * Map a given page frame, returning the mapped virtual address. The page is
  * then accessible within the current VCPU until a corresponding unmap call.
  */
-extern void *map_domain_page(unsigned long pfn);
+void *map_domain_page(unsigned long mfn);
 
 /*
  * Pass a VA within a page previously mapped in the context of the
- * currently-executing VCPU via a call to map_domain_pages().
+ * currently-executing VCPU via a call to map_domain_page().
  */
-extern void unmap_domain_page(void *va);
-
-/* 
- * Convert a VA (within a page previously mapped in the context of the
- * currently-executing VCPU via a call to map_domain_pages()) to a machine 
- * address 
- */
-extern paddr_t mapped_domain_page_to_maddr(void *va);
+void unmap_domain_page(void *va);
 
 /*
  * Similar to the above calls, except the mapping is accessible in all
  * address spaces (not just within the VCPU that created the mapping). Global
  * mappings can also be unmapped from any context.
  */
-extern void *map_domain_page_global(unsigned long pfn);
-extern void unmap_domain_page_global(void *va);
+void *map_domain_page_global(unsigned long mfn);
+void unmap_domain_page_global(void *va);
+
+/* 
+ * Convert a VA (within a page previously mapped in the context of the
+ * currently-executing VCPU via a call to map_domain_page(), or via a
+ * previous call to map_domain_page_global()) to the mapped machine address.
+ */
+paddr_t maddr_from_mapped_domain_page(void *va);
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
 
 struct domain_mmap_cache {
-    unsigned long pfn;
+    unsigned long mfn;
     void         *va;
     unsigned int  flags;
 };
@@ -55,12 +55,12 @@ domain_mmap_cache_init(struct domain_mma
 {
     ASSERT(cache != NULL);
     cache->flags = 0;
-    cache->pfn = 0;
+    cache->mfn = 0;
     cache->va = NULL;
 }
 
 static inline void *
-map_domain_page_with_cache(unsigned long pfn, struct domain_mmap_cache *cache)
+map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
 {
     ASSERT(cache != NULL);
     BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
@@ -68,13 +68,13 @@ map_domain_page_with_cache(unsigned long
     if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
     {
         cache->flags |= DMCACHE_ENTRY_HELD;
-        if ( likely(pfn == cache->pfn) )
+        if ( likely(mfn == cache->mfn) )
             goto done;
         unmap_domain_page(cache->va);
     }
 
-    cache->pfn   = pfn;
-    cache->va    = map_domain_page(pfn);
+    cache->mfn   = mfn;
+    cache->va    = map_domain_page(mfn);
     cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
 
  done:
@@ -103,26 +103,22 @@ domain_mmap_cache_destroy(struct domain_
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(pfn)                maddr_to_virt((pfn)<<PAGE_SHIFT)
+#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
 #define unmap_domain_page(va)               ((void)(va))
-#define mapped_domain_page_to_maddr(va)     (virt_to_maddr(va))
 
-#define map_domain_page_global(pfn)         maddr_to_virt((pfn)<<PAGE_SHIFT)
+#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
 #define unmap_domain_page_global(va)        ((void)(va))
+
+#define maddr_from_mapped_domain_page(va)   (virt_to_maddr(va))
 
 struct domain_mmap_cache { 
 };
 
 #define domain_mmap_cache_init(c)           ((void)(c))
-#define map_domain_page_with_cache(pfn,c)   (map_domain_page(pfn))
+#define map_domain_page_with_cache(mfn,c)   (map_domain_page(mfn))
 #define unmap_domain_page_with_cache(va,c)  ((void)(va))
 #define domain_mmap_cache_destroy(c)        ((void)(c))
 
 #endif /* !CONFIG_DOMAIN_PAGE */
 
-#define HERE_I_AM \
-do { \
-    printk("HERE I AM: %s %s %d\n", __func__, __FILE__, __LINE__); \
-} while (0)
-
 #endif /* __XEN_DOMAIN_PAGE_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Fix maddr_from_mapped_domain_page()., Xen patchbot-unstable <=