This allows userspace to perform mmap() on the gntdev device and then
immediately close the filehandle or remove the mapping using the
remove ioctl, with the mapped area remaining valid until unmapped.
This also fixes an infinite loop when a gntdev device is closed
without first unmapping all areas.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
drivers/xen/gntdev.c | 69 ++++++++++++++++++++-----------------------------
1 files changed, 28 insertions(+), 41 deletions(-)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 6fe3c3c..11876bb 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -62,12 +62,12 @@ struct gntdev_priv {
struct grant_map {
struct list_head next;
- struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
int flags;
int is_mapped;
+ atomic_t users;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
@@ -118,7 +118,7 @@ static struct grant_map *gntdev_alloc_map(struct
gntdev_priv *priv, int count)
add->index = 0;
add->count = count;
- add->priv = priv;
+ atomic_set(&add->users, 1);
return add;
@@ -168,27 +168,17 @@ static struct grant_map *gntdev_find_map_index(struct
gntdev_priv *priv,
return NULL;
}
-static int gntdev_del_map(struct grant_map *map)
-{
- int i;
-
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
-
- atomic_sub(map->count, &pages_mapped);
- list_del(&map->next);
- return 0;
-}
-
-static void gntdev_free_map(struct grant_map *map)
+static void gntdev_put_map(struct grant_map *map)
{
int i;
if (!map)
return;
+
+ if (!atomic_dec_and_test(&map->users))
+ return;
+
+ atomic_sub(map->count, &pages_mapped);
if (map->pages)
for (i = 0; i < map->count; i++) {
@@ -268,6 +258,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
map->is_mapped = 0;
map->vma = NULL;
vma->vm_private_data = NULL;
+ gntdev_put_map(map);
}
static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -389,17 +380,14 @@ static int gntdev_release(struct inode *inode, struct
file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
pr_debug("priv %p\n", priv);
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
@@ -426,16 +414,16 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv,
if (!map)
return err;
- if (copy_from_user(map->grants, &u->refs,
- sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
- return err;
- }
-
if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit))
{
pr_debug("%s: can't map: over limit\n", __FUNCTION__);
- gntdev_free_map(map);
+ gntdev_put_map(map);
+ return err;
+ }
+
+ if (copy_from_user(map->grants, &u->refs,
+ sizeof(map->grants[0]) * op.count) != 0) {
+ gntdev_put_map(map);
return err;
}
@@ -444,13 +432,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv,
op.index = map->index << PAGE_SHIFT;
spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+
return 0;
}
@@ -467,11 +451,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct
gntdev_priv *priv,
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ list_del(&map->next);
+ gntdev_put_map(map);
+ err = 0;
+ }
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
return err;
}
@@ -551,6 +536,8 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
goto unlock_out;
}
+ atomic_inc(&map->users);
+
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
--
1.7.3.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|