WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Clean up the interface for sharing xen-heap pages with g

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Clean up the interface for sharing xen-heap pages with guests.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 09 Mar 2006 03:24:07 +0000
Delivery-date: Thu, 09 Mar 2006 03:24:56 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID a4dc14edd56b2b59fa3f7797b048ce6ee8efc9e9
# Parent  4af3f8bd7e0c67977df60409b13c785c6e451e91
Clean up the interface for sharing xen-heap pages with guests.
Map trace buffer pages as DOMID_XEN pages.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 4af3f8bd7e0c -r a4dc14edd56b tools/xenmon/xenbaked.c
--- a/tools/xenmon/xenbaked.c   Wed Mar  8 23:04:43 2006
+++ b/tools/xenmon/xenbaked.c   Wed Mar  8 23:45:40 2006
@@ -299,7 +299,7 @@
         exit(EXIT_FAILURE);
     }
 
-    tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
+    tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN,
                                         size * num, PROT_READ | PROT_WRITE,
                                         tbufs_mfn);
 
diff -r 4af3f8bd7e0c -r a4dc14edd56b tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Wed Mar  8 23:04:43 2006
+++ b/tools/xentrace/xentrace.c Wed Mar  8 23:45:40 2006
@@ -144,7 +144,7 @@
         exit(EXIT_FAILURE);
     }
 
-    tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
+    tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN,
                                         size * num, PROT_READ | PROT_WRITE,
                                         tbufs_mfn);
 
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Mar  8 23:04:43 2006
+++ b/xen/arch/x86/domain.c     Wed Mar  8 23:45:40 2006
@@ -312,7 +312,8 @@
             goto fail_nomem;
 
         memset(d->shared_info, 0, PAGE_SIZE);
-        SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
+        share_xen_page_with_guest(
+            virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
     return 0;
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Mar  8 23:04:43 2006
+++ b/xen/arch/x86/mm.c Wed Mar  8 23:45:40 2006
@@ -176,10 +176,9 @@
 
 void arch_init_memory(void)
 {
-    extern void subarch_init_memory(struct domain *);
+    extern void subarch_init_memory(void);
 
     unsigned long i, pfn, rstart_pfn, rend_pfn;
-    struct page_info *page;
 
     memset(percpu_info, 0, sizeof(percpu_info));
 
@@ -189,6 +188,7 @@
      * their domain field set to dom_xen.
      */
     dom_xen = alloc_domain();
+    spin_lock_init(&dom_xen->page_alloc_lock);
     atomic_set(&dom_xen->refcnt, 1);
     dom_xen->domain_id = DOMID_XEN;
 
@@ -198,17 +198,13 @@
      * array. Mappings occur at the priv of the caller.
      */
     dom_io = alloc_domain();
+    spin_lock_init(&dom_io->page_alloc_lock);
     atomic_set(&dom_io->refcnt, 1);
     dom_io->domain_id = DOMID_IO;
 
     /* First 1MB of RAM is historically marked as I/O. */
     for ( i = 0; i < 0x100; i++ )
-    {
-        page = mfn_to_page(i);
-        page->count_info        = PGC_allocated | 1;
-        page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
-        page_set_owner(page, dom_io);
-    }
+        share_xen_page_with_guest(mfn_to_page(i), dom_io, XENSHARE_writable);
  
     /* Any areas not specified as RAM by the e820 map are considered I/O. */
     for ( i = 0, pfn = 0; i < e820.nr_map; i++ )
@@ -221,17 +217,45 @@
         for ( ; pfn < rstart_pfn; pfn++ )
         {
             BUG_ON(!mfn_valid(pfn));
-            page = mfn_to_page(pfn);
-            page->count_info        = PGC_allocated | 1;
-            page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
-            page_set_owner(page, dom_io);
+            share_xen_page_with_guest(
+                mfn_to_page(pfn), dom_io, XENSHARE_writable);
         }
         /* Skip the RAM region. */
         pfn = rend_pfn;
     }
     BUG_ON(pfn != max_page);
 
-    subarch_init_memory(dom_xen);
+    subarch_init_memory();
+}
+
+void share_xen_page_with_guest(
+    struct page_info *page, struct domain *d, int readonly)
+{
+    if ( page_get_owner(page) == d )
+        return;
+
+    spin_lock(&d->page_alloc_lock);
+
+    /* The incremented type count pins as writable or read-only. */
+    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
+    page->u.inuse.type_info |= PGT_validated | 1;
+
+    page_set_owner(page, d);
+    wmb(); /* install valid domain ptr before updating refcnt. */
+    ASSERT(page->count_info == 0);
+    page->count_info |= PGC_allocated | 1;
+
+    if ( unlikely(d->xenheap_pages++ == 0) )
+        get_knownalive_domain(d);
+    list_add_tail(&page->list, &d->xenpage_list);
+
+    spin_unlock(&d->page_alloc_lock);
+}
+
+void share_xen_page_with_privileged_guests(
+    struct page_info *page, int readonly)
+{
+    share_xen_page_with_guest(page, dom_xen, readonly);
 }
 
 void write_ptbase(struct vcpu *v)
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Wed Mar  8 23:04:43 2006
+++ b/xen/arch/x86/x86_32/mm.c  Wed Mar  8 23:45:40 2006
@@ -144,7 +144,7 @@
     flush_tlb_all_pge();
 }
 
-void subarch_init_memory(struct domain *dom_xen)
+void subarch_init_memory(void)
 {
     unsigned long m2p_start_mfn;
     unsigned int i, j;
@@ -175,10 +175,7 @@
         for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
         {
             struct page_info *page = mfn_to_page(m2p_start_mfn + j);
-            page->count_info = PGC_allocated | 1;
-            /* Ensure it's only mapped read-only by domains. */
-            page->u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(page, dom_xen);
+            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
 
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Wed Mar  8 23:04:43 2006
+++ b/xen/arch/x86/x86_64/mm.c  Wed Mar  8 23:45:40 2006
@@ -134,7 +134,7 @@
     flush_tlb_all_pge();
 }
 
-void subarch_init_memory(struct domain *dom_xen)
+void subarch_init_memory(void)
 {
     unsigned long i, v, m2p_start_mfn;
     l3_pgentry_t l3e;
@@ -174,11 +174,7 @@
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             struct page_info *page = mfn_to_page(m2p_start_mfn + i);
-            page->count_info = PGC_allocated | 1;
-            /* gdt to make sure it's only mapped read-only by non-privileged
-               domains. */
-            page->u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(page, dom_xen);
+            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
 }
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/common/trace.c
--- a/xen/common/trace.c        Wed Mar  8 23:04:43 2006
+++ b/xen/common/trace.c        Wed Mar  8 23:45:40 2006
@@ -83,8 +83,9 @@
 
     /* Share pages so that xentrace can map them. */
     for ( i = 0; i < nr_pages; i++ )
-        SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf + i * PAGE_SIZE), dom0);
-    
+        share_xen_page_with_privileged_guests(
+            virt_to_page(rawbuf) + i, XENSHARE_writable);
+
     for_each_online_cpu ( i )
     {
         buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Mar  8 23:04:43 2006
+++ b/xen/include/asm-ia64/mm.h Wed Mar  8 23:45:40 2006
@@ -118,7 +118,8 @@
 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
 
 /* Dummy now */
-#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
+#define share_xen_page_with_guest(p, d, r) do { } while (0)
+#define share_xen_page_with_privileged_guests(p, r) do { } while (0)
 
 extern struct page_info *frame_table;
 extern unsigned long frame_table_size;
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Wed Mar  8 23:04:43 2006
+++ b/xen/include/asm-x86/grant_table.h Wed Mar  8 23:45:40 2006
@@ -23,8 +23,9 @@
 
 #define gnttab_create_shared_page(d, t, i)                               \
     do {                                                                 \
-        SHARE_PFN_WITH_DOMAIN(                                           \
-            virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), (d)); \
+        share_xen_page_with_guest(                                       \
+            virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)),       \
+            (d), XENSHARE_writable);                                     \
         set_gpfn_from_mfn(                                               \
             (virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i),            \
             INVALID_M2P_ENTRY);                                          \
diff -r 4af3f8bd7e0c -r a4dc14edd56b xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Mar  8 23:04:43 2006
+++ b/xen/include/asm-x86/mm.h  Wed Mar  8 23:45:40 2006
@@ -138,21 +138,12 @@
 #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
 
-#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
-    do {                                                                    \
-        page_set_owner((_pfn), (_dom));                                     \
-        /* The incremented type count is intended to pin to 'writable'. */  \
-        (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;  \
-        wmb(); /* install valid domain ptr before updating refcnt. */       \
-        spin_lock(&(_dom)->page_alloc_lock);                                \
-        /* _dom holds an allocation reference */                            \
-        ASSERT((_pfn)->count_info == 0);                                    \
-        (_pfn)->count_info |= PGC_allocated | 1;                            \
-        if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
-            get_knownalive_domain(_dom);                                    \
-        list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list);                \
-        spin_unlock(&(_dom)->page_alloc_lock);                              \
-    } while ( 0 )
+#define XENSHARE_writable 0
+#define XENSHARE_readonly 1
+extern void share_xen_page_with_guest(
+    struct page_info *page, struct domain *d, int readonly);
+extern void share_xen_page_with_privileged_guests(
+    struct page_info *page, int readonly);
 
 extern struct page_info *frame_table;
 extern unsigned long max_page;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Clean up the interface for sharing xen-heap pages with guests., Xen patchbot -unstable <=