WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [xenppc-unstable] [IA64] GNTMAP_readonly support xen part

To: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Subject: [XenPPC] [xenppc-unstable] [IA64] GNTMAP_readonly support xen part
From: Xen patchbot-xenppc-unstable <patchbot-xenppc-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 02 Jun 2006 17:56:21 +0000
Delivery-date: Fri, 02 Jun 2006 10:58:08 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ppc-devel-request@lists.xensource.com?subject=help>
List-id: Xen PPC development <xen-ppc-devel.lists.xensource.com>
List-post: <mailto:xen-ppc-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ppc-devel>, <mailto:xen-ppc-devel-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-ppc-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 40959bc0a269ee71ba2378e7ffbff31c187a78d2
# Parent  2cab08ac143b73db874fc8691e9d9f8580ca7cc2
[IA64] GNTMAP_readonly support xen part

add grant table GNTMAP_readonly support.
introduce ASSIGN_readonly flags for read only page assignment to
pseudo physical address space.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_init.c   |    2 -
 xen/arch/ia64/xen/domain.c     |   68 +++++++++++++++++++----------------------
 xen/arch/ia64/xen/process.c    |   16 +++++++++
 xen/include/asm-ia64/domain.h  |    4 +-
 xen/include/public/arch-ia64.h |    4 ++
 5 files changed, 55 insertions(+), 39 deletions(-)

diff -r 2cab08ac143b -r 40959bc0a269 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed May 24 10:39:55 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed May 24 10:56:47 2006 -0600
@@ -346,7 +346,7 @@ int vmx_build_physmap_table(struct domai
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               __assign_domain_page(d, j, io_ranges[i].type);
+               __assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable);
        }
 
        /* Map normal memory below 3G */
diff -r 2cab08ac143b -r 40959bc0a269 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed May 24 10:39:55 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Wed May 24 10:56:47 2006 -0600
@@ -834,17 +834,19 @@ assign_new_domain0_page(struct domain *d
 }
 
 /* map a physical address to the specified metaphysical addr */
+// flags: currently only ASSIGN_readonly
 void
 __assign_domain_page(struct domain *d,
-                     unsigned long mpaddr, unsigned long physaddr)
+                     unsigned long mpaddr, unsigned long physaddr,
+                     unsigned long flags)
 {
     pte_t *pte;
+    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
     if (pte_none(*pte)) {
-        set_pte(pte,
-                pfn_pte(physaddr >> PAGE_SHIFT,
-                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+        set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
+                             __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags)));
         mb ();
     } else
         printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
@@ -861,7 +863,7 @@ assign_domain_page(struct domain *d,
     BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
     ret = get_page(page, d);
     BUG_ON(ret == 0);
-    __assign_domain_page(d, mpaddr, physaddr);
+    __assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable);
 
     //XXX CONFIG_XEN_IA64_DOM0_VP
     //    TODO racy
@@ -871,12 +873,12 @@ assign_domain_page(struct domain *d,
 #ifdef CONFIG_XEN_IA64_DOM0_VP
 static void
 assign_domain_same_page(struct domain *d,
-                          unsigned long mpaddr, unsigned long size)
+                        unsigned long mpaddr, unsigned long size)
 {
     //XXX optimization
     unsigned long end = mpaddr + size;
     for (; mpaddr < end; mpaddr += PAGE_SIZE) {
-        __assign_domain_page(d, mpaddr, mpaddr);
+        __assign_domain_page(d, mpaddr, mpaddr, ASSIGN_writable);
     }
 }
 
@@ -1113,15 +1115,14 @@ unsigned long lookup_domain_mpa(struct d
                }
                pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
-               pte = &pteval;
-               return *(unsigned long *)pte;
+               return pte_val(pteval);
        }
 #endif
        pte = lookup_noalloc_domain_pte(d, mpaddr);
        if (pte != NULL) {
                if (pte_present(*pte)) {
 //printk("lookup_domain_page: found mapping for %lx, 
pte=%lx\n",mpaddr,pte_val(*pte));
-                       return *(unsigned long *)pte;
+                       return pte_val(*pte);
                } else if (VMX_DOMAIN(d->vcpu[0]))
                        return GPFN_INV_MASK;
        }
@@ -1135,7 +1136,10 @@ unsigned long lookup_domain_mpa(struct d
                printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
                       mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
        mpafoo(mpaddr);
-       return 0;
+
+       //XXX This is a work around until the emulation memory access to a 
region
+       //    where memory or device are attached is implemented.
+       return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | 
_PAGE_AR_RWX)));
 }
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
@@ -1159,19 +1163,21 @@ out:
 
 // caller must get_page(mfn_to_page(mfn)) before
 // caller must call set_gpfn_from_mfn().
+// flags: currently only ASSIGN_readonly
 static void
 assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
-                           unsigned long mfn, unsigned int flags)
+                           unsigned long mfn, unsigned long flags)
 {
     struct mm_struct *mm = &d->arch.mm;
     pte_t* pte;
     pte_t old_pte;
     pte_t npte;
+    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
 
     // update pte
-    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
+    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
     old_pte = ptep_xchg(mm, mpaddr, pte, npte);
     if (!pte_none(old_pte)) {
         unsigned long old_mfn;
@@ -1200,11 +1206,11 @@ assign_domain_page_replace(struct domain
 
 unsigned long
 dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
-                   unsigned int flags, domid_t domid)
+                   unsigned long flags, domid_t domid)
 {
     int error = 0;
-
     struct domain* rd;
+
     rd = find_domain_by_id(domid);
     if (unlikely(rd == NULL)) {
         switch (domid) {
@@ -1234,7 +1240,7 @@ dom0vp_add_physmap(struct domain* d, uns
         goto out1;
     }
 
-    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* flags:XXX */);
+    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
     //don't update p2m table because this page belongs to rd, not d.
 out1:
     put_domain(rd);
@@ -1254,23 +1260,18 @@ create_grant_host_mapping(unsigned long 
     struct page_info* page;
     int ret;
 
-    if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
+    if (flags & (GNTMAP_device_map | 
+                 GNTMAP_application_map | GNTMAP_contains_pte)) {
         DPRINTK("%s: flags 0x%x\n", __func__, flags);
         return GNTST_general_error;
-    }
-    if (flags & GNTMAP_readonly) {
-#if 0
-        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
-                __func__, flags);
-#endif
-        flags &= ~GNTMAP_readonly;
     }
 
     page = mfn_to_page(mfn);
     ret = get_page(page, page_get_owner(page));
     BUG_ON(ret == 0);
-    assign_domain_page_replace(d, gpaddr, mfn, flags);
-
+
+    assign_domain_page_replace(d, gpaddr, mfn, (flags & GNTMAP_readonly)?
+                                              ASSIGN_readonly: 
ASSIGN_writable);
     return GNTST_okay;
 }
 
@@ -1289,22 +1290,17 @@ destroy_grant_host_mapping(unsigned long
         DPRINTK("%s: flags 0x%x\n", __func__, flags);
         return GNTST_general_error;
     }
-    if (flags & GNTMAP_readonly) {
-#if 0
-        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
-                __func__, flags);
-#endif
-        flags &= ~GNTMAP_readonly;
-    }
 
     pte = lookup_noalloc_domain_pte(d, gpaddr);
     if (pte == NULL || !pte_present(*pte) || pte_pfn(*pte) != mfn)
-        return GNTST_general_error;//XXX GNTST_bad_pseudo_phys_addr
+        return GNTST_general_error;
 
     // update pte
     old_pte = ptep_get_and_clear(&d->arch.mm, gpaddr, pte);
     if (pte_present(old_pte)) {
-        old_mfn = pte_pfn(old_pte);//XXX
+        old_mfn = pte_pfn(old_pte);
+    } else {
+        return GNTST_general_error;
     }
     domain_page_flush(d, gpaddr, old_mfn, INVALID_MFN);
 
@@ -1405,7 +1401,7 @@ guest_physmap_add_page(struct domain *d,
 
     ret = get_page(mfn_to_page(mfn), d);
     BUG_ON(ret == 0);
-    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* XXX */);
+    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable);
     set_gpfn_from_mfn(mfn, gpfn);//XXX SMP
 
     //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> 
PAGE_SHIFT));
diff -r 2cab08ac143b -r 40959bc0a269 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Wed May 24 10:39:55 2006 -0600
+++ b/xen/arch/ia64/xen/process.c       Wed May 24 10:56:47 2006 -0600
@@ -85,6 +85,8 @@ u64 translate_domain_pte(u64 pteval, u64
        struct domain *d = current->domain;
        ia64_itir_t itir = {.itir = itir__};
        u64 mask, mpaddr, pteval2;
+       u64 arflags;
+       u64 arflags2;
 
        pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
 
@@ -123,6 +125,20 @@ u64 translate_domain_pte(u64 pteval, u64
        }
 #endif
        pteval2 = lookup_domain_mpa(d,mpaddr);
+       arflags  = pteval  & _PAGE_AR_MASK;
+       arflags2 = pteval2 & _PAGE_AR_MASK;
+       if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
+#if 0
+               DPRINTK("%s:%d "
+                       "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
+                       "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
+                       __func__, __LINE__,
+                       pteval, arflags, address, itir__,
+                       pteval2, arflags2, mpaddr);
+#endif
+               pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
+}
+
        pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
        pteval2 |= (pteval & _PAGE_ED);
        pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
diff -r 2cab08ac143b -r 40959bc0a269 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Wed May 24 10:39:55 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Wed May 24 10:56:47 2006 -0600
@@ -114,7 +114,7 @@ struct arch_vcpu {
 
 struct page_info * assign_new_domain_page(struct domain *d, unsigned long 
mpaddr);
 void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
-void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned 
long physaddr);
+void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned 
long physaddr, unsigned long flags);
 void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr);
 void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned 
long flags);
 #ifdef CONFIG_XEN_IA64_DOM0_VP
@@ -123,7 +123,7 @@ unsigned long assign_domain_mach_page(st
 unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, 
unsigned long size);
 unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned 
long arg1, unsigned long arg2, unsigned long arg3);
 unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, 
unsigned int extent_order);
-unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, 
unsigned long mfn, unsigned int flags, domid_t domid);
+unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, 
unsigned long mfn, unsigned long flags, domid_t domid);
 #else
 #define alloc_dom_xen_and_dom_io()      do { } while (0)
 #endif
diff -r 2cab08ac143b -r 40959bc0a269 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed May 24 10:39:55 2006 -0600
+++ b/xen/include/public/arch-ia64.h    Wed May 24 10:56:47 2006 -0600
@@ -369,6 +369,10 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
 #define IA64_DOM0VP_add_physmap         18      // assigne machine page frane
                                                 // to dom0's pseudo physical
                                                 // address space.
+// flags for page assignement to pseudo physical address space
+#define _ASSIGN_readonly                0
+#define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
+#define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
 
 #endif /* !__ASSEMBLY__ */
 

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [XenPPC] [xenppc-unstable] [IA64] GNTMAP_readonly support xen part, Xen patchbot-xenppc-unstable <=