ChangeSet 1.1682, 2005/06/06 11:52:53+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Merge x86/32 and x86/64 usercopy routines.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
b/xen/arch/x86/usercopy.c | 139 ++++++++++
b/xen/include/asm-x86/types.h | 4
b/xen/include/asm-x86/uaccess.h | 253 ++++++++++++++++++
b/xen/include/asm-x86/x86_32/uaccess.h | 301 +---------------------
b/xen/include/asm-x86/x86_64/uaccess.h | 248 ------------------
xen/arch/x86/x86_32/usercopy.c | 443 ---------------------------------
xen/arch/x86/x86_64/usercopy.c | 183 -------------
7 files changed, 420 insertions(+), 1151 deletions(-)
diff -Nru a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
--- /dev/null Wed Dec 31 16:00:00 196900
+++ b/xen/arch/x86/usercopy.c 2005-06-06 07:02:13 -04:00
@@ -0,0 +1,139 @@
+/*
+ * User address space access functions.
+ *
+ * Copyright 1997 Andi Kleen <ak@xxxxxx>
+ * Copyright 1997 Linus Torvalds
+ * Copyright 2002 Andi Kleen <ak@xxxxxxx>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <asm/uaccess.h>
+
+unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
+{
+ unsigned long __d0, __d1, __d2, __n = n;
+ __asm__ __volatile__(
+ " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
+ " jbe 1f\n"
+ " mov %1,%0\n"
+ " neg %0\n"
+ " and $"STR(BYTES_PER_LONG-1)",%0\n"
+ " sub %0,%3\n"
+ "4: rep; movsb\n" /* make 'to' address aligned */
+ " mov %3,%0\n"
+ " shr $"STR(LONG_BYTEORDER)",%0\n"
+ " and $"STR(BYTES_PER_LONG-1)",%3\n"
+ " .align 2,0x90\n"
+ "0: rep; movs"__OS"\n" /* as many words as possible... */
+ " mov %3,%0\n"
+ "1: rep; movsb\n" /* ...remainder copied as bytes */
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "5: add %3,%0\n"
+ " jmp 2b\n"
+ "3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " "__FIXUP_ALIGN"\n"
+ " "__FIXUP_WORD" 4b,5b\n"
+ " "__FIXUP_WORD" 0b,3b\n"
+ " "__FIXUP_WORD" 1b,2b\n"
+ ".previous"
+ : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
+ : "3"(__n), "0"(__n), "1"(to), "2"(from)
+ : "memory");
+ return (unsigned)__n;
+}
+
+unsigned long
+__copy_from_user_ll(void *to, const void __user *from, unsigned n)
+{
+ unsigned long __d0, __d1, __d2, __n = n;
+ __asm__ __volatile__(
+ " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
+ " jbe 1f\n"
+ " mov %1,%0\n"
+ " neg %0\n"
+ " and $"STR(BYTES_PER_LONG-1)",%0\n"
+ " sub %0,%3\n"
+ "4: rep; movsb\n" /* make 'to' address aligned */
+ " mov %3,%0\n"
+ " shr $"STR(LONG_BYTEORDER)",%0\n"
+ " and $"STR(BYTES_PER_LONG-1)",%3\n"
+ " .align 2,0x90\n"
+ "0: rep; movs"__OS"\n" /* as many words as possible... */
+ " mov %3,%0\n"
+ "1: rep; movsb\n" /* ...remainder copied as bytes */
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "5: add %3,%0\n"
+ " jmp 6f\n"
+ "3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
+ "6: push %0\n"
+ " push %%"__OP"ax\n"
+ " xor %%eax,%%eax\n"
+ " rep; stosb\n"
+ " pop %%"__OP"ax\n"
+ " pop %0\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " "__FIXUP_ALIGN"\n"
+ " "__FIXUP_WORD" 4b,5b\n"
+ " "__FIXUP_WORD" 0b,3b\n"
+ " "__FIXUP_WORD" 1b,6b\n"
+ ".previous"
+ : "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
+ : "3"(__n), "0"(__n), "1"(to), "2"(from)
+ : "memory");
+ return (unsigned)__n;
+}
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+copy_to_user(void __user *to, const void *from, unsigned n)
+{
+ if (access_ok(to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+}
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long
+copy_from_user(void *to, const void __user *from, unsigned n)
+{
+ if (access_ok(from, n))
+ n = __copy_from_user(to, from, n);
+ else
+ memset(to, 0, n);
+ return n;
+}
diff -Nru a/xen/arch/x86/x86_32/usercopy.c b/xen/arch/x86/x86_32/usercopy.c
--- a/xen/arch/x86/x86_32/usercopy.c 2005-06-06 07:02:13 -04:00
+++ /dev/null Wed Dec 31 16:00:00 196900
@@ -1,443 +0,0 @@
-/*
- * User address space access functions.
- * The non inlined parts of asm-i386/uaccess.h are here.
- *
- * Copyright 1997 Andi Kleen <ak@xxxxxx>
- * Copyright 1997 Linus Torvalds
- */
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <asm/uaccess.h>
-
-static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned
long n)
-{
-#ifdef CONFIG_X86_INTEL_USERCOPY
- if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
- return 0;
-#endif
- return 1;
-}
-#define movsl_is_ok(a1,a2,n) \
- __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
-
-
-/*
- * Zero Userspace
- */
-
-#define __do_clear_user(addr,size) \
-do { \
- int __d0; \
- __asm__ __volatile__( \
- "0: rep; stosl\n" \
- " movl %2,%0\n" \
- "1: rep; stosb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%2,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=&c"(size), "=&D" (__d0) \
- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
-} while (0)
-
-/**
- * clear_user: - Zero a block of memory in user space.
- * @to: Destination address, in user space.
- * @n: Number of bytes to zero.
- *
- * Zero a block of memory in user space.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned long
-clear_user(void __user *to, unsigned long n)
-{
- if (access_ok(to, n))
- __do_clear_user(to, n);
- return n;
-}
-
-/**
- * __clear_user: - Zero a block of memory in user space, with less checking.
- * @to: Destination address, in user space.
- * @n: Number of bytes to zero.
- *
- * Zero a block of memory in user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned long
-__clear_user(void __user *to, unsigned long n)
-{
- __do_clear_user(to, n);
- return n;
-}
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
-static unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
-{
- int d0, d1;
- __asm__ __volatile__(
- " .align 2,0x90\n"
- "1: movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 3f\n"
- "2: movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
- "3: movl 0(%4), %%eax\n"
- "4: movl 4(%4), %%edx\n"
- "5: movl %%eax, 0(%3)\n"
- "6: movl %%edx, 4(%3)\n"
- "7: movl 8(%4), %%eax\n"
- "8: movl 12(%4),%%edx\n"
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|