I'd like to split the patch into small ones, so that it can be clearer.
Attach is the patch of adding support copy_to/from_guest.
Signed-off-by: Xiaofeng Ling <xiaofeng.ling@xxxxxxxxx>
arch/x86/x86_32/usercopy.c | 99
+++++++++++++++++++++++++++++++++++++++
arch/x86/x86_64/usercopy.c | 15 +++++
include/asm-x86/x86_32/uaccess.h | 5 +
include/asm-x86/x86_64/uaccess.h | 5 +
4 files changed, 124 insertions(+)
Ling, Xiaofeng wrote:
Keir Fraser <mailto:Keir.Fraser@xxxxxxxxxxxx> wrote:
On 3 Jun 2005, at 03:40, Xiaofeng Ling wrote:
It's now all use shadow_mode_external, and use a permit bitmap for
hypercall from vmx domain. Do you think it's now acceptable?
It's against 1657.
guest
Still messy imo. When I said to split the path by
shadow_mode_externel, I meant you should do it within the uaccess
macros/functions; not in their callers. guest
I've already done that for copy_from/to_user,
but for __copy_from/to_user
I can not do that, because not all the caller shall call copy_from/to_guest
===== xen/arch/x86/x86_32/usercopy.c 1.10 vs edited =====
--- 1.10/xen/arch/x86/x86_32/usercopy.c 2005-06-03 03:54:00 +08:00
+++ edited/xen/arch/x86/x86_32/usercopy.c 2005-06-03 10:13:28 +08:00
@@ -9,6 +9,8 @@
#include <xen/config.h>
#include <xen/lib.h>
#include <asm/uaccess.h>
+#include <asm/domain_page.h>
+#include <asm/shadow.h>
static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned
long n)
{
@@ -395,6 +397,98 @@
return n;
}
+void* map_domain_vaddr(void * guest_vaddr, unsigned long len)
+{
+ l1_pgentry_t gpte;
+ unsigned long mfn;
+ unsigned long ma;
+ void * vstart;
+
+ if (len > PAGE_SIZE)
+ {
+ return NULL;
+ }
+
+ if (((unsigned long)guest_vaddr & PAGE_MASK) ==
+ (((unsigned long)guest_vaddr + len -1) & PAGE_MASK))
+ {
+ gpte = gva_to_gpte((unsigned long)guest_vaddr);
+ mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
+ ma = (mfn << PAGE_SHIFT) |
+ ((unsigned long)guest_vaddr & (PAGE_SIZE - 1));
+ vstart = (void *)map_domain_mem(ma);
+ }
+ else
+ {
+ return NULL;
+ }
+ return vstart;
+}
+
+unsigned long
+copy_from_guest(void *to, const void __user *from, unsigned long n)
+{
+ void *hfrom;
+ unsigned long ncopy;
+ int nleft;
+ ncopy = (((unsigned long)from + PAGE_SIZE) & PAGE_MASK) -
+ (unsigned long)from;
+ ncopy = ncopy > n ? n : ncopy;
+
+ for(nleft = n; nleft > 0; ncopy = nleft > PAGE_SIZE ? PAGE_SIZE : nleft)
+ {
+ hfrom = map_domain_vaddr((void*)from, ncopy);
+ if(hfrom)
+ {
+ memcpy(to, hfrom, ncopy);
+ unmap_domain_mem((void*)hfrom);
+ }
+ else
+ {
+ printk("error!, copy from guest map error, from:%p, ncopy:%ld\n",
+ from, ncopy);
+ return nleft;
+ }
+ nleft -= ncopy;
+ from += ncopy;
+ to += ncopy;
+ }
+ return nleft;
+}
+EXPORT_SYMBOL(copy_from_guest);
+
+unsigned long
+copy_to_guest(void __user *to, const void *from, unsigned long n)
+{
+ void *hto;
+ unsigned long ncopy;
+ int nleft;
+
+ ncopy = (((unsigned long)to + PAGE_SIZE) & PAGE_MASK) - (unsigned long)to;
+ ncopy = ncopy > n ? n : ncopy;
+
+ for(nleft = n; nleft > 0; ncopy = nleft > PAGE_SIZE ? PAGE_SIZE : nleft)
+ {
+ hto = map_domain_vaddr((void*)to, ncopy);
+ if(hto)
+ {
+ memcpy(hto, from, ncopy);
+ unmap_domain_mem((void*)hto);
+ }
+ else
+ {
+ printk("error!, copy to guest map error, from:%p, ncopy:%ld\n",
+ from, ncopy);
+ return nleft;
+ }
+ nleft -= ncopy;
+ from += ncopy;
+ to += ncopy;
+ }
+ return nleft;
+}
+EXPORT_SYMBOL(copy_to_guest);
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
@@ -411,6 +505,8 @@
unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ if(shadow_mode_external(current->domain))
+ return copy_to_guest(to, from, n);
if (access_ok(to, n))
n = __copy_to_user(to, from, n);
return n;
@@ -435,6 +531,9 @@
unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+
+ if(shadow_mode_external(current->domain))
+ return copy_from_guest(to, from, n);
if (access_ok(from, n))
n = __copy_from_user(to, from, n);
else
===== xen/arch/x86/x86_64/usercopy.c 1.6 vs edited =====
--- 1.6/xen/arch/x86/x86_64/usercopy.c 2005-06-03 03:54:00 +08:00
+++ edited/xen/arch/x86/x86_64/usercopy.c 2005-06-03 10:13:30 +08:00
@@ -135,6 +135,21 @@
return n;
}
+unsigned long
+copy_from_guest(void *to, const void __user *from, unsigned long n)
+{
+ return n;
+}
+EXPORT_SYMBOL(copy_from_guest);
+
+unsigned long
+copy_to_guest(void __user *to, const void *from, unsigned long n)
+{
+ return n;
+}
+EXPORT_SYMBOL(copy_to_guest);
+
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
===== xen/include/asm-x86/x86_32/uaccess.h 1.19 vs edited =====
--- 1.19/xen/include/asm-x86/x86_32/uaccess.h 2005-04-23 00:34:08 +08:00
+++ edited/xen/include/asm-x86/x86_32/uaccess.h 2005-06-03 10:13:40 +08:00
@@ -332,6 +332,11 @@
unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n);
+unsigned long copy_to_guest(void __user *to,
+ const void *from, unsigned long n);
+unsigned long copy_from_guest(void *to,
+ const void __user *from, unsigned long n);
+
unsigned long clear_user(void __user *mem, unsigned long len);
unsigned long __clear_user(void __user *mem, unsigned long len);
===== xen/include/asm-x86/x86_64/uaccess.h 1.15 vs edited =====
--- 1.15/xen/include/asm-x86/x86_64/uaccess.h 2005-04-19 21:48:04 +08:00
+++ edited/xen/include/asm-x86/x86_64/uaccess.h 2005-06-03 10:13:41 +08:00
@@ -224,6 +224,11 @@
unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
+unsigned long copy_to_guest(void __user *to,
+ const void *from, unsigned long n);
+unsigned long copy_from_guest(void *to,
+ const void __user *from, unsigned long n);
+
static always_inline int __copy_from_user(void *dst, const void __user *src,
unsigned size)
{
int ret = 0;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|