WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [linux-2.6.18-xen] privcmd: avoid deadlock due to copy_(

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [linux-2.6.18-xen] privcmd: avoid deadlock due to copy_(to|from)_user with mmap_sem write lock held.
From: "Xen patchbot-linux-2.6.18-xen" <patchbot-linux-2.6.18-xen@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 27 Jun 2008 13:00:10 -0700
Delivery-date: Fri, 27 Jun 2008 13:00:02 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1214337767 -3600
# Node ID 043dc7488c1105670a78cd45efa459fc4defa52d
# Parent  5201a184f5131c76f8263aa70c031c00dd094067
privcmd: avoid deadlock due to copy_(to|from)_user with mmap_sem write lock 
held.

Accessing user memory with the write lock held is illegal, if the pages are
non-present then we will deadlock against the lock in do_page_fault. Avoid this
for IOCTL_PRIVCMD_MMAP and MMAPBATCH by copying the entire input data structure
into the kernel before taking the lock.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 drivers/xen/privcmd/privcmd.c |  162 ++++++++++++++++++++++++++++++------------
 1 files changed, 117 insertions(+), 45 deletions(-)

diff -r 5201a184f513 -r 043dc7488c11 drivers/xen/privcmd/privcmd.c
--- a/drivers/xen/privcmd/privcmd.c     Fri Jun 20 17:43:16 2008 +0100
+++ b/drivers/xen/privcmd/privcmd.c     Tue Jun 24 21:02:47 2008 +0100
@@ -93,13 +93,16 @@ static long privcmd_ioctl(struct file *f
        break;
 
        case IOCTL_PRIVCMD_MMAP: {
+#define MMAP_NR_PER_PAGE (int)((PAGE_SIZE-sizeof(struct 
list_head))/sizeof(privcmd_mmap_entry_t))
                privcmd_mmap_t mmapcmd;
-               privcmd_mmap_entry_t msg;
+               privcmd_mmap_entry_t *msg;
                privcmd_mmap_entry_t __user *p;
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
                unsigned long va;
                int i, rc;
+               LIST_HEAD(pagelist);
+               struct list_head *l,*l2;
 
                if (!is_initial_xendomain())
                        return -EPERM;
@@ -108,63 +111,92 @@ static long privcmd_ioctl(struct file *f
                        return -EFAULT;
 
                p = mmapcmd.entry;
-               if (copy_from_user(&msg, p, sizeof(msg)))
-                       return -EFAULT;
+               for (i = 0; i < mmapcmd.num;) {
+                       int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+
+                       rc = -ENOMEM;
+                       l = (struct list_head *) __get_free_page(GFP_KERNEL);
+                       if (l == NULL)
+                               goto mmap_out;
+
+                       INIT_LIST_HEAD(l);
+                       list_add_tail(l, &pagelist);
+                       msg = (privcmd_mmap_entry_t*)(l + 1);
+
+                       rc = -EFAULT;
+                       if (copy_from_user(msg, p, nr*sizeof(*msg)))
+                               goto mmap_out;
+                       i += nr;
+                       p += nr;
+               }
+
+               l = pagelist.next;
+               msg = (privcmd_mmap_entry_t*)(l + 1);
 
                down_write(&mm->mmap_sem);
 
-               vma = find_vma(mm, msg.va);
+               vma = find_vma(mm, msg->va);
                rc = -EINVAL;
-               if (!vma || (msg.va != vma->vm_start) ||
+               if (!vma || (msg->va != vma->vm_start) ||
                    !privcmd_enforce_singleshot_mapping(vma))
                        goto mmap_out;
 
                va = vma->vm_start;
 
-               for (i = 0; i < mmapcmd.num; i++) {
-                       rc = -EFAULT;
-                       if (copy_from_user(&msg, p, sizeof(msg)))
-                               goto mmap_out;
-
-                       /* Do not allow range to wrap the address space. */
-                       rc = -EINVAL;
-                       if ((msg.npages > (LONG_MAX >> PAGE_SHIFT)) ||
-                           ((unsigned long)(msg.npages << PAGE_SHIFT) >= -va))
-                               goto mmap_out;
-
-                       /* Range chunks must be contiguous in va space. */
-                       if ((msg.va != va) ||
-                           ((msg.va+(msg.npages<<PAGE_SHIFT)) > vma->vm_end))
-                               goto mmap_out;
-
-                       if ((rc = direct_remap_pfn_range(
-                               vma,
-                               msg.va & PAGE_MASK, 
-                               msg.mfn, 
-                               msg.npages << PAGE_SHIFT, 
-                               vma->vm_page_prot,
-                               mmapcmd.dom)) < 0)
-                               goto mmap_out;
-
-                       p++;
-                       va += msg.npages << PAGE_SHIFT;
+               i = 0;
+               list_for_each(l, &pagelist) {
+                       int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
+
+                       msg = (privcmd_mmap_entry_t*)(l + 1);
+                       while (i<nr) {
+
+                               /* Do not allow range to wrap the address 
space. */
+                               rc = -EINVAL;
+                               if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
+                                   ((unsigned long)(msg->npages << PAGE_SHIFT) 
>= -va))
+                                       goto mmap_out;
+
+                               /* Range chunks must be contiguous in va space. 
*/
+                               if ((msg->va != va) ||
+                                   ((msg->va+(msg->npages<<PAGE_SHIFT)) > 
vma->vm_end))
+                                       goto mmap_out;
+
+                               if ((rc = direct_remap_pfn_range(
+                                            vma,
+                                            msg->va & PAGE_MASK,
+                                            msg->mfn,
+                                            msg->npages << PAGE_SHIFT,
+                                            vma->vm_page_prot,
+                                            mmapcmd.dom)) < 0)
+                                       goto mmap_out;
+
+                               va += msg->npages << PAGE_SHIFT;
+                               msg++;
+                               i++;
+                       }
                }
 
                rc = 0;
 
        mmap_out:
                up_write(&mm->mmap_sem);
+               list_for_each_safe(l,l2,&pagelist)
+                       free_page((unsigned long)l);
                ret = rc;
        }
+#undef MMAP_NR_PER_PAGE
        break;
 
        case IOCTL_PRIVCMD_MMAPBATCH: {
+#define MMAPBATCH_NR_PER_PAGE (unsigned long)((PAGE_SIZE-sizeof(struct 
list_head))/sizeof(unsigned long))
                privcmd_mmapbatch_t m;
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
                xen_pfn_t __user *p;
-               unsigned long addr, mfn, nr_pages;
+               unsigned long addr, *mfn, nr_pages;
                int i;
+               LIST_HEAD(pagelist);
+               struct list_head *l, *l2;
 
                if (!is_initial_xendomain())
                        return -EPERM;
@@ -176,34 +208,74 @@ static long privcmd_ioctl(struct file *f
                if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
                        return -EINVAL;
 
+               p = m.arr;
+               for (i=0; i<nr_pages; ) {
+                       int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+
+                       ret = -ENOMEM;
+                       l = (struct list_head *)__get_free_page(GFP_KERNEL);
+                       if (l == NULL)
+                               goto mmapbatch_out;
+
+                       INIT_LIST_HEAD(l);
+                       list_add_tail(l, &pagelist);
+
+                       mfn = (unsigned long*)(l + 1);
+                       ret = -EFAULT;
+                       if (copy_from_user(mfn, p, nr*sizeof(*mfn)))
+                               goto mmapbatch_out;
+
+                       i += nr; p+= nr;
+               }
+
                down_write(&mm->mmap_sem);
 
                vma = find_vma(mm, m.addr);
+               ret = -EINVAL;
                if (!vma ||
                    (m.addr != vma->vm_start) ||
                    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
                    !privcmd_enforce_singleshot_mapping(vma)) {
                        up_write(&mm->mmap_sem);
-                       return -EINVAL;
+                       goto mmapbatch_out;
                }
 
                p = m.arr;
                addr = m.addr;
-               for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
-                       if (get_user(mfn, p)) {
-                               up_write(&mm->mmap_sem);
-                               return -EFAULT;
+               i = 0;
+               ret = 0;
+               list_for_each(l, &pagelist) {
+                       int nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
+                       mfn = (unsigned long *)(l + 1);
+
+                       while (i<nr) {
+                               if(direct_remap_pfn_range(vma, addr & PAGE_MASK,
+                                                         *mfn, PAGE_SIZE,
+                                                         vma->vm_page_prot, 
m.dom) < 0) {
+                                       *mfn |= 0xf0000000U;
+                                       ret++;
+                               }
+                               mfn++; i++; addr += PAGE_SIZE;
                        }
-
-                       ret = direct_remap_pfn_range(vma, addr & PAGE_MASK,
-                                                    mfn, PAGE_SIZE,
-                                                    vma->vm_page_prot, m.dom);
-                       if (ret < 0)
-                               put_user(0xF0000000 | mfn, p);
                }
 
                up_write(&mm->mmap_sem);
-               ret = 0;
+               if (ret > 0) {
+                       p = m.arr;
+                       i = 0;
+                       ret = 0;
+                       list_for_each(l, &pagelist) {
+                               int nr = min(nr_pages - i, 
MMAPBATCH_NR_PER_PAGE);
+                               mfn = (unsigned long *)(l + 1);
+                               if (copy_to_user(p, mfn, nr*sizeof(*mfn)))
+                                       ret = -EFAULT;
+                               i += nr; p += nr;
+                       }
+               }
+       mmapbatch_out:
+               list_for_each_safe(l,l2,&pagelist)
+                       free_page((unsigned long)l);
+#undef MMAPBATCH_NR_PER_PAGE
        }
        break;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [linux-2.6.18-xen] privcmd: avoid deadlock due to copy_(to|from)_user with mmap_sem write lock held., Xen patchbot-linux-2.6.18-xen <=