For the sake of not breaking the ia64 build, old behavior is being
retained when HAVE_ARCH_PRIVCMD_MMAP. Hopefully someone able to
test ia64 can fix this up in the not too distant future.
As usual, written against 2.6.32.2 and made apply to the 2.6.18
tree without further testing.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- head-2010-01-04.orig/drivers/xen/privcmd/privcmd.c 2010-01-04
13:32:08.000000000 +0100
+++ head-2010-01-04/drivers/xen/privcmd/privcmd.c 2010-01-05
11:21:42.000000000 +0100
@@ -34,7 +34,22 @@ static struct proc_dir_entry *capabiliti
static struct proc_dir_entry *capabilities_intf;
#ifndef HAVE_ARCH_PRIVCMD_MMAP
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
+static int enforce_singleshot_mapping_fn(pte_t *pte, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ return pte_none(*pte) ? 0 : -EBUSY;
+}
+
+static inline int enforce_singleshot_mapping(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long npages)
+{
+ return apply_to_page_range(vma->vm_mm, addr, npages << PAGE_SHIFT,
+ enforce_singleshot_mapping_fn, NULL) == 0;
+}
+#else
+#define enforce_singleshot_mapping(vma, addr, npages) \
+ privcmd_enforce_singleshot_mapping(vma)
#endif
static long privcmd_ioctl(struct file *file,
@@ -88,6 +103,9 @@ static long privcmd_ioctl(struct file *f
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
+ if (mmapcmd.num <= 0)
+ return -EINVAL;
+
p = mmapcmd.entry;
for (i = 0; i < mmapcmd.num;) {
int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
@@ -115,8 +133,7 @@ static long privcmd_ioctl(struct file *f
vma = find_vma(mm, msg->va);
rc = -EINVAL;
- if (!vma || (msg->va != vma->vm_start) ||
- !privcmd_enforce_singleshot_mapping(vma))
+ if (!vma || (msg->va != vma->vm_start))
goto mmap_out;
va = vma->vm_start;
@@ -129,7 +146,6 @@ static long privcmd_ioctl(struct file *f
while (i<nr) {
/* Do not allow range to wrap the address
space. */
- rc = -EINVAL;
if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
((unsigned long)(msg->npages << PAGE_SHIFT)
>= -va))
goto mmap_out;
@@ -139,6 +155,23 @@ static long privcmd_ioctl(struct file *f
((msg->va+(msg->npages<<PAGE_SHIFT)) >
vma->vm_end))
goto mmap_out;
+ va += msg->npages << PAGE_SHIFT;
+ msg++;
+ i++;
+ }
+ }
+
+ if (!enforce_singleshot_mapping(vma, vma->vm_start,
+ (va - vma->vm_start) >>
PAGE_SHIFT))
+ goto mmap_out;
+
+ va = vma->vm_start;
+ i = 0;
+ list_for_each(l, &pagelist) {
+ int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+
+ msg = (privcmd_mmap_entry_t*)(l + 1);
+ while (i < nr) {
if ((rc = direct_remap_pfn_range(
vma,
msg->va & PAGE_MASK,
@@ -184,7 +217,9 @@ static long privcmd_ioctl(struct file *f
return -EFAULT;
nr_pages = m.num;
- if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+ addr = m.addr;
+ if (m.num <= 0 || nr_pages > (LONG_MAX >> PAGE_SHIFT) ||
+ addr != m.addr || nr_pages > (-addr >> PAGE_SHIFT))
return -EINVAL;
p = m.arr;
@@ -209,24 +244,16 @@ static long privcmd_ioctl(struct file *f
down_write(&mm->mmap_sem);
- vma = find_vma(mm, m.addr);
+ vma = find_vma(mm, addr);
ret = -EINVAL;
if (!vma ||
- (m.addr != vma->vm_start) ||
- ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
- !privcmd_enforce_singleshot_mapping(vma)) {
- if (!(vma &&
- (m.addr >= vma->vm_start) &&
- ((m.addr + (nr_pages << PAGE_SHIFT)) <=
vma->vm_end) &&
- (nr_pages == 1) &&
- !privcmd_enforce_singleshot_mapping(vma))) {
- up_write(&mm->mmap_sem);
- goto mmapbatch_out;
- }
+ addr < vma->vm_start ||
+ addr + (nr_pages << PAGE_SHIFT) > vma->vm_end ||
+ !enforce_singleshot_mapping(vma, addr, nr_pages)) {
+ up_write(&mm->mmap_sem);
+ goto mmapbatch_out;
}
- p = m.arr;
- addr = m.addr;
i = 0;
ret = 0;
list_for_each(l, &pagelist) {
@@ -309,11 +336,6 @@ static int privcmd_mmap(struct file * fi
return 0;
}
-
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
-{
- return (xchg(&vma->vm_private_data, (void *)1) == NULL);
-}
#endif
static const struct file_operations privcmd_file_ops = {
xenlinux-privcmd-singleshot.patch
Description: Text document
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|