The entire hypercall argument list isn't required; only selected
fields from the hypercall need to be tracked between the ioctl, map,
and unmap operations.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
drivers/xen/gntdev.c | 225 ++++++++++++++++++++++++++++++--------------------
1 files changed, 136 insertions(+), 89 deletions(-)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 59e6a51..d0802b5 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -60,18 +60,24 @@ struct gntdev_priv {
struct mmu_notifier mn;
};
+struct granted_page {
+ u64 pte_maddr;
+ union {
+ struct ioctl_gntdev_grant_ref target;
+ grant_handle_t handle;
+ };
+};
+
struct grant_map {
struct list_head next;
struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
- int flags;
- int is_mapped;
- struct ioctl_gntdev_grant_ref *grants;
- struct gnttab_map_grant_ref *map_ops;
- struct gnttab_unmap_grant_ref *unmap_ops;
+ int is_mapped:1;
+ int is_ro:1;
struct page **pages;
+ struct granted_page pginfo[0];
};
/* ------------------------------------------------------------------ */
@@ -91,24 +97,19 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#endif
}
-static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+static struct grant_map *gntdev_alloc_map(int count,
+ struct ioctl_gntdev_grant_ref* grants)
{
struct grant_map *add;
int i;
- add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
- if (NULL == add)
+ add = kzalloc(sizeof(*add) + sizeof(add->pginfo[0])*count, GFP_KERNEL);
+ if (!add)
return NULL;
- add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
- add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
- add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
- add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
- if (NULL == add->grants ||
- NULL == add->map_ops ||
- NULL == add->unmap_ops ||
- NULL == add->pages)
- goto err;
+ add->pages = kzalloc(sizeof(add->pages[0])*count, GFP_KERNEL);
+ if (!add->pages)
+ goto err_nopages;
for (i = 0; i < count; i++) {
add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -118,20 +119,18 @@ static struct grant_map *gntdev_alloc_map(struct
gntdev_priv *priv, int count)
add->index = 0;
add->count = count;
- add->priv = priv;
+ for(i = 0; i < count; i++)
+ add->pginfo[i].target = grants[i];
return add;
err:
- if (add->pages)
- for (i = 0; i < count; i++) {
- if (add->pages[i])
- __free_page(add->pages[i]);
- }
+ for (i = 0; i < count; i++) {
+ if (add->pages[i])
+ __free_page(add->pages[i]);
+ }
kfree(add->pages);
- kfree(add->grants);
- kfree(add->map_ops);
- kfree(add->unmap_ops);
+err_nopages:
kfree(add);
return NULL;
}
@@ -140,6 +139,7 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct
grant_map *add)
{
struct grant_map *map;
+ spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
if (add->index + add->count < map->index) {
list_add_tail(&add->next, &map->next);
@@ -150,7 +150,9 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct
grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
+ add->priv = priv;
gntdev_print_maps(priv, "[new]", add->index);
+ spin_unlock(&priv->lock);
}
static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
@@ -191,9 +193,10 @@ static int gntdev_del_map(struct grant_map *map)
if (map->vma)
return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
+ if (map->is_mapped)
+ for (i = 0; i < map->count; i++)
+ if (map->pginfo[i].handle)
+ return -EBUSY;
atomic_sub(map->count, &pages_mapped);
list_del(&map->next);
@@ -207,15 +210,11 @@ static void gntdev_free_map(struct grant_map *map)
if (!map)
return;
- if (map->pages)
- for (i = 0; i < map->count; i++) {
- if (map->pages[i])
- __free_page(map->pages[i]);
- }
+ for (i = 0; i < map->count; i++) {
+ if (map->pages[i])
+ __free_page(map->pages[i]);
+ }
kfree(map->pages);
- kfree(map->grants);
- kfree(map->map_ops);
- kfree(map->unmap_ops);
kfree(map);
}
@@ -229,50 +228,96 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
+
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+ map->pginfo[pgnr].pte_maddr = pte_maddr;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- map->grants[pgnr].ref,
- map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- 0 /* handle */);
return 0;
}
static int map_grant_pages(struct grant_map *map)
{
- int i, err = 0;
+ int i, flags, err = 0;
+ struct gnttab_map_grant_ref* map_ops = NULL;
- pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs(map->map_ops, map->pages, map->count);
- if (err)
- return err;
+ flags = GNTMAP_host_map | GNTMAP_application_map | GNTMAP_contains_pte;
+ if (map->is_ro)
+ flags |= GNTMAP_readonly;
+
+ err = -ENOMEM;
+ map_ops = kzalloc(sizeof(map_ops[0]) * map->count, GFP_TEMPORARY);
+ if (!map_ops)
+ goto out;
+
+ for(i=0; i < map->count; i++) {
+ gnttab_set_map_op(&map_ops[i], map->pginfo[i].pte_maddr, flags,
+ map->pginfo[i].target.ref,
+ map->pginfo[i].target.domid);
+ }
+ pr_debug("%s: map %d+%d\n", __FUNCTION__, map->index, map->count);
+
+ err = gnttab_map_refs(map_ops, map->pages, map->count);
+
+ if (WARN_ON(err))
+ goto out;
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status)
+ if (map_ops[i].status) {
+ __free_page(map->pages[i]);
+ map->pages[i] = NULL;
err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ } else {
+ map->pginfo[i].handle = map_ops[i].handle;
+ }
}
+
+out:
+ kfree(map_ops);
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static void unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
- int i, err = 0;
+ int i, flags, err = 0;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_unmap_grant_ref unmap_single;
+
+ if (pages > 1) {
+ unmap_ops = kzalloc(sizeof(unmap_ops[0]) * pages,
+ GFP_TEMPORARY);
+ if (unlikely(!unmap_ops)) {
+ for(i=0; i < pages; i++)
+ unmap_grant_pages(map, offset + i, 1);
+ return;
+ }
+ } else {
+ unmap_ops = &unmap_single;
+ }
+
+ flags = GNTMAP_host_map | GNTMAP_application_map | GNTMAP_contains_pte;
+ if (map->is_ro)
+ flags |= GNTMAP_readonly;
+ for(i=0; i < pages; i++)
+ gnttab_set_unmap_op(&unmap_ops[i],
+ map->pginfo[offset+i].pte_maddr, flags,
+ map->pginfo[offset+i].handle);
pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+
+ err = gnttab_unmap_refs(unmap_ops, map->pages + offset, pages);
+
if (err)
- return err;
+ goto out;
for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
+ WARN_ON(unmap_ops[i].status);
+ __free_page(map->pages[offset+i]);
+ map->pages[offset+i] = NULL;
+ map->pginfo[offset+i].handle = 0;
}
- return err;
+out:
+ if (unmap_ops != &unmap_single)
+ kfree(unmap_ops);
}
/* ------------------------------------------------------------------ */
@@ -309,7 +354,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct grant_map *map;
unsigned long mstart, mend;
- int err;
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -327,10 +371,9 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
start, end, mstart, mend);
- err = unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >>
PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
+ unmap_grant_pages(map,
+ (mstart - map->vma->vm_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
}
spin_unlock(&priv->lock);
}
@@ -347,7 +390,6 @@ static void mn_release(struct mmu_notifier *mn,
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct grant_map *map;
- int err;
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -356,8 +398,7 @@ static void mn_release(struct mmu_notifier *mn,
pr_debug("map %d+%d (%lx %lx)\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
+ unmap_grant_pages(map, /* offset */ 0, map->count);
}
spin_unlock(&priv->lock);
}
@@ -430,6 +471,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv,
{
struct ioctl_gntdev_map_grant_ref op;
struct grant_map *map;
+ struct ioctl_gntdev_grant_ref* grants;
int err;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -438,37 +480,44 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv,
if (unlikely(op.count <= 0))
return -EINVAL;
- err = -ENOMEM;
- map = gntdev_alloc_map(priv, op.count);
- if (!map)
- return err;
+ grants = kmalloc(sizeof(grants[0]) * op.count, GFP_TEMPORARY);
+ if (!grants)
+ return -ENOMEM;
- if (copy_from_user(map->grants, &u->refs,
- sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
- return err;
+ if (copy_from_user(grants, u->refs, sizeof(grants[0]) * op.count)) {
+ err = -EFAULT;
+ goto out_free;
}
+ err = -ENOMEM;
+ map = gntdev_alloc_map(op.count, grants);
+ if (!map)
+ goto out_free;
+
if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit))
{
pr_debug("%s: can't map: over limit\n", __FUNCTION__);
- gntdev_free_map(map);
- return err;
+ goto out_free_map;
}
- spin_lock(&priv->lock);
gntdev_add_map(priv, map);
op.index = map->index << PAGE_SHIFT;
- spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
+ if (copy_to_user(u, &op, sizeof(op))) {
+ err = -EFAULT;
+ goto out_remove;
}
- return 0;
+ err = 0;
+out_free:
+ kfree(grants);
+ return err;
+out_remove:
+ spin_lock(&priv->lock);
+ gntdev_del_map(map);
+ spin_unlock(&priv->lock);
+out_free_map:
+ gntdev_free_map(map);
+ goto out_free;
}
static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
@@ -574,9 +623,7 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
vma->vm_private_data = map;
map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
+ map->is_ro = !(vma->vm_flags & VM_WRITE);
spin_unlock(&priv->lock);
--
1.7.3.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|