WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] adds necessary put/get_user, copy_from/to_user, etc stuf

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] adds necessary put/get_user, copy_from/to_user, etc stuff for VTI
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Wed, 22 Jun 2005 04:07:43 +0000
Cc: james@xxxxxxxxxxxxx
Delivery-date: Thu, 23 Jun 2005 11:03:51 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1713.2.21, 2005/06/21 22:07:43-06:00, djm@xxxxxxxxxxxxxxx

        adds necessary put/get_user, copy_from/to_user, etc stuff for VTI 
        
        Signed-off-by Kevin Tian <Kevin.tian@xxxxxxxxx>



 arch/ia64/patch/linux-2.6.11/uaccess.h |   20 +++-
 arch/ia64/vmmu.c                       |   52 +++++++++++
 include/asm-ia64/vmx_uaccess.h         |  156 +++++++++++++++++++++++++++++++++
 3 files changed, 225 insertions(+), 3 deletions(-)


diff -Nru a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h 
b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
--- a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h        2005-06-23 07:05:02 
-04:00
+++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h        2005-06-23 07:05:02 
-04:00
@@ -1,6 +1,17 @@
---- ../../linux-2.6.11/include/asm-ia64/uaccess.h      2005-06-06 
10:36:23.000000000 -0600
-+++ include/asm-ia64/uaccess.h 2005-06-10 18:08:06.000000000 -0600
-@@ -60,6 +60,11 @@
+--- ../../linux-2.6.11/include/asm-ia64/uaccess.h      2005-03-02 
00:37:53.000000000 -0700
++++ include/asm-ia64/uaccess.h 2005-06-21 21:53:20.000000000 -0600
+@@ -32,6 +32,10 @@
+  *    David Mosberger-Tang <davidm@xxxxxxxxxx>
+  */
+ 
++#ifdef CONFIG_VTI
++#include <asm/vmx_uaccess.h>
++#else // CONFIG_VTI
++
+ #include <linux/compiler.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+@@ -60,6 +64,11 @@
   * address TASK_SIZE is never valid.  We also need to make sure that the 
address doesn't
   * point inside the virtually mapped linear page table.
   */
@@ -12,7 +23,7 @@
  #define __access_ok(addr, size, segment)                                      
        \
  ({                                                                            
        \
        __chk_user_ptr(addr);                                                   
        \
-@@ -67,6 +72,7 @@
+@@ -67,6 +76,7 @@
         && ((segment).seg == KERNEL_DS.seg                                     
        \
             || likely(REGION_OFFSET((unsigned long) (addr)) < 
RGN_MAP_LIMIT)));        \
  })
@@ -20,3 +31,11 @@
  #define access_ok(type, addr, size)   __access_ok((addr), (size), get_fs())
  
  static inline int
+@@ -343,6 +353,7 @@
+       __su_ret;                                               \
+ })
+ 
++#endif // CONFIG_VTI
+ /* Generic code can't deal with the location-relative format that we use for 
compactness.  */
+ #define ARCH_HAS_SORT_EXTABLE
+ #define ARCH_HAS_SEARCH_EXTABLE
diff -Nru a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
--- a/xen/arch/ia64/vmmu.c      2005-06-23 07:05:02 -04:00
+++ b/xen/arch/ia64/vmmu.c      2005-06-23 07:05:02 -04:00
@@ -792,3 +792,55 @@
     return IA64_NO_FAULT;
 }
 
+/*
+ * [FIXME] Is there any effective way to move this routine
+ * into vmx_uaccess.h? struct exec_domain is incomplete type
+ * in that way...
+ *
+ * This is the interface to lookup virtual TLB, and then
+ * return corresponding machine address in 2nd parameter.
+ * The 3rd parameter contains how many bytes mapped by
+ * matched vTLB entry, thus to allow caller copy more once.
+ *
+ * If failed to lookup, -EFAULT is returned. Or else reutrn
+ * 0. All upper domain access utilities rely on this routine
+ * to determine the real machine address. 
+ *
+ * Yes, put_user and get_user seems to somhow slow upon it.
+ * However it's the necessary steps for any vmx domain virtual
+ * address, since that's difference address space as HV's one.
+ * Later some short-circuit may be created for special case
+ */
+long
+__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
+{
+    unsigned long      mpfn, gpfn, m, n = *len;
+    thash_cb_t         *vtlb;
+    unsigned long      end;    /* end of the area mapped by current entry */
+    thash_data_t       *entry;
+    struct vcpu *v = current;
+    ia64_rr    vrr;
+
+    vtlb = vmx_vcpu_get_vtlb(v); 
+    vrr = vmx_vcpu_rr(v, va);
+    entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
+    if (entry == NULL)
+       return -EFAULT;
+
+    gpfn =(entry->ppn>>(PAGE_SHIFT-12));
+    gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
+    gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
+
+    mpfn = __gpfn_to_mfn(v->domain, gpfn);
+    m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
+    /* machine address may be not continuous */
+    end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
+    /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
+    /* Current entry can't map all requested area */
+    if ((m + n) > end)
+       n = end - m;
+
+    *ma = m;
+    *len = n;
+    return 0;
+}
diff -Nru a/xen/include/asm-ia64/vmx_uaccess.h 
b/xen/include/asm-ia64/vmx_uaccess.h
--- /dev/null   Wed Dec 31 16:00:00 196900
+++ b/xen/include/asm-ia64/vmx_uaccess.h        2005-06-23 07:05:02 -04:00
@@ -0,0 +1,156 @@
+/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
+ * across the domain/hypervisor boundary.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Note:  For vmx enabled environment, poor man's policy is actually
+ * useless since HV resides in completely different address space as
+ * domain. So the only way to do the access is search vTLB first, and
+ * access identity mapped address if hit.
+ *
+ * Copyright (c) 2004, Intel Corporation.
+ *     Kun Tian (Kevin Tian) (kevin.tian@xxxxxxxxx)
+ */
+
+#ifndef __ASM_IA64_VMX_UACCESS_H__
+#define __ASM_IA64_VMX_UACCESS_H__
+
+#include <xen/compiler.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+
+#include <asm/intrinsics.h>
+#include <asm/vmmu.h>
+
+/* Since HV never accesses domain space directly, most security check can
+ * be dummy now
+ */
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+/* For back compatibility */
+#define __access_ok(addr, size, segment)       1
+#define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr)       __put_user((x), (ptr))
+#define get_user(x, ptr)       __get_user((x), (ptr))
+
+#define __put_user(x, ptr)     __do_put_user((__typeof__(*(ptr))) (x), (ptr), 
sizeof(*(ptr)))
+#define __get_user(x, ptr)     __do_get_user((x), (ptr), sizeof(*(ptr)))
+
+/* TODO: add specific unaligned access later. If assuming aligned at
+ * 1,2,4,8 bytes by far, it's impossible for operand spaning two
+ * vTLB entry
+ */
+extern long
+__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
+
+#define __do_put_user(x, ptr, size)                                    \
+({                                                                     \
+    __typeof__ (x) __pu_x = (x);                                       \
+    __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                      \
+    __typeof__ (size) __pu_size = (size);                              \
+    unsigned long __pu_ma;                                             \
+    long __pu_err;                                                     \
+                                                                       \
+    __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr,              \
+                               &__pu_ma, &__pu_size);                  \
+    __pu_err ? (__pu_err = -EFAULT) :                                  \
+       (*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x);                  \
+    __pu_err;                                                          \
+})
+
+#define __do_get_user(x, ptr, size)                                    \
+({                                                                     \
+    __typeof__ (x) __gu_x = (x);                                       \
+    __typeof__ (*(ptr)) __user *__gu_ptr = (ptr);                      \
+    __typeof__ (size) __gu_size = (size);                              \
+    unsigned long __gu_ma;                                             \
+    long __gu_err;                                                     \
+                                                                       \
+    __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr,              \
+                               &__gu_ma, &__gu_size);                  \
+    __gu_err ? (__gu_err = -EFAULT) :                                  \
+       (x = *((__typeof__ (*(ptr)) *)__va(__gu_ma)));                  \
+    __gu_err;                                                          \
+})
+
+/* More complex copy from domain */
+#define copy_from_user(to, from, n)    __copy_from_user((to), (from), (n))
+#define copy_to_user(to, from, n)      __copy_to_user((to), (from), (n))
+#define clear_user(to, n)              __clear_user((t0), (n))
+
+static inline unsigned long
+__copy_from_user(void *to, void *from, unsigned long n)
+{
+    unsigned long ma, i;
+
+    i = n;
+    while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
+           memcpy(to, (void *)__va(ma), i);
+           n -= i;
+        if (!n)
+            break;
+           from += i;
+           to += i;
+           i = n;
+    }
+    return n;
+}
+
+static inline unsigned long
+__copy_to_user(void *to, void *from, unsigned long n)
+{
+    unsigned long ma, i;
+
+    i = n;
+    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
+           memcpy((void *)__va(ma), from, i);
+           n -= i;
+        if (!n)
+            break;
+           from += i;
+           to += i;
+           i = n;
+    }
+    return n;
+}
+
+static inline unsigned long
+__clear_user(void *to, unsigned long n)
+{
+    unsigned long ma, i;
+
+    i = n;
+    while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
+           memset((void *)__va(ma), 0, i);
+           n -= i;
+        if (!n)
+            break;
+           to += i;
+           i = n;
+    }
+    return n;
+}
+
+#endif // __ASM_IA64_VMX_UACCESS_H__

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] adds necessary put/get_user, copy_from/to_user, etc stuff for VTI, BitKeeper Bot <=