This changes the /dev/xen/gntdev device to work in HVM domains, and
also makes the mmap() behavior more closely match the behavior of
files instead of requiring an ioctl() call for each mmap/munmap.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
drivers/xen/Kconfig | 3 +-
drivers/xen/gntdev.c | 612 +++++++++++++++++++++++---------------------------
include/xen/gntdev.h | 9 +-
3 files changed, 295 insertions(+), 329 deletions(-)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index fa9982e..a9f3a8f 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -176,9 +176,8 @@ config XEN_XENBUS_FRONTEND
config XEN_GNTDEV
tristate "userspace grant access device driver"
depends on XEN
- select MMU_NOTIFIER
help
- Allows userspace processes use grants.
+ Allows userspace processes to map grants from other domains.
config XEN_S3
def_bool y
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a33e443..15f5c9c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -44,31 +44,37 @@ MODULE_DESCRIPTION("User-space granted page access driver");
static int debug = 0;
module_param(debug, int, 0644);
-static int limit = 1024;
+
+static int limit = 1024*1024;
module_param(limit, int, 0644);
+static atomic_t pages_mapped = ATOMIC_INIT(0);
+
struct gntdev_priv {
- struct list_head maps;
- uint32_t used;
- uint32_t limit;
spinlock_t lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
+ struct list_head maps;
+};
+
+struct granted_page {
+ struct page* page;
+ union {
+ struct ioctl_gntdev_grant_ref target;
+ grant_handle_t handle;
+ };
};
struct grant_map {
- struct list_head next;
- struct gntdev_priv *priv;
- struct vm_area_struct *vma;
- int index;
- int count;
- int flags;
- int is_mapped;
- struct ioctl_gntdev_grant_ref *grants;
- struct gnttab_map_grant_ref *map_ops;
- struct gnttab_unmap_grant_ref *unmap_ops;
+ struct list_head next; /* next in file */
+ int index; /* offset in parent */
+ int count; /* size in pages */
+ atomic_t users; /* reference count */
+ unsigned int is_mapped:1; /* has map hypercall been run? */
+ unsigned int is_ro:1; /* is the map read-only? */
+ struct granted_page pages[0]; /* pages used for mapping */
};
+static struct vm_operations_struct gntdev_vmops;
+
/* ------------------------------------------------------------------ */
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -76,51 +82,46 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
{
struct grant_map *map;
- printk("%s: maps list (priv %p, usage %d/%d)\n",
- __FUNCTION__, priv, priv->used, priv->limit);
- list_for_each_entry(map, &priv->maps, next)
- printk(" index %2d, count %2d %s\n",
- map->index, map->count,
+ printk("%s: maps list (priv %p)\n", __FUNCTION__, priv);
+ list_for_each_entry(map, &priv->maps, next) {
+ printk(" %p: %2d+%2d, r%c, %s %d,%d %s\n", map,
+ map->index, map->count, map->is_ro ? 'o' : 'w',
+ map->is_mapped ? "use,hnd" : "dom,ref",
+ map->is_mapped ? atomic_read(&map->users)
+ : map->pages[0].target.domid,
+ map->is_mapped ? map->pages[0].handle
+ : map->pages[0].target.ref,
map->index == text_index && text ? text : "");
+ }
}
-static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+static struct grant_map *gntdev_alloc_map(int count,
+ struct ioctl_gntdev_grant_ref* grants)
{
struct grant_map *add;
+ int i;
- add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
- if (NULL == add)
+ add = kzalloc(sizeof(struct grant_map) +
+ sizeof(struct granted_page) * count, GFP_KERNEL);
+ if (!add)
return NULL;
- add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
- add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
- add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
- if (NULL == add->grants ||
- NULL == add->map_ops ||
- NULL == add->unmap_ops)
- goto err;
-
- add->index = 0;
+ atomic_set(&add->users, 1);
add->count = count;
- add->priv = priv;
- if (add->count + priv->used > priv->limit)
- goto err;
+ for (i = 0; i < count; i++)
+ add->pages[i].target = grants[i];
return add;
-
-err:
- kfree(add->grants);
- kfree(add->map_ops);
- kfree(add->unmap_ops);
- kfree(add);
- return NULL;
}
static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
{
struct grant_map *map;
+ spin_lock(&priv->lock);
+
+ /* Try to fit in the new mapping as early as possible */
list_for_each_entry(map, &priv->maps, next) {
if (add->index + add->count < map->index) {
list_add_tail(&add->next, &map->next);
@@ -131,225 +132,116 @@ static void gntdev_add_map(struct gntdev_priv *priv,
struct grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
- priv->used += add->count;
if (debug)
gntdev_print_maps(priv, "[new]", add->index);
+
+ spin_unlock(&priv->lock);
}
-static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, int
index,
- int count)
+static void __gntdev_del_map(struct gntdev_priv *priv, struct grant_map *map)
{
- struct grant_map *map;
+ list_del(&map->next);
+}
- list_for_each_entry(map, &priv->maps, next) {
- if (map->index != index)
- continue;
- if (map->count != count)
- continue;
- return map;
- }
- return NULL;
+static void gntdev_del_map(struct gntdev_priv *priv, struct grant_map *map)
+{
+ spin_lock(&priv->lock);
+ __gntdev_del_map(priv, map);
+ spin_unlock(&priv->lock);
}
-static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
- unsigned long vaddr)
+static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, int
index,
+ int count)
{
struct grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (vaddr < map->vma->vm_start)
+ if (map->index != index)
continue;
- if (vaddr >= map->vma->vm_end)
+ if (map->count != count)
continue;
return map;
}
return NULL;
}
-static int gntdev_del_map(struct grant_map *map)
-{
- int i;
-
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
-
- map->priv->used -= map->count;
- list_del(&map->next);
- return 0;
-}
-
-static void gntdev_free_map(struct grant_map *map)
-{
- if (!map)
- return;
- kfree(map->grants);
- kfree(map->map_ops);
- kfree(map->unmap_ops);
- kfree(map);
-}
-
-/* ------------------------------------------------------------------ */
-
-static int find_grant_ptes(pte_t *pte, pgtable_t token, unsigned long addr,
void *data)
-{
- struct grant_map *map = data;
- unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
- u64 pte_maddr;
-
- BUG_ON(pgnr >= map->count);
- pte_maddr = (u64)pfn_to_mfn(page_to_pfn(token)) << PAGE_SHIFT;
- pte_maddr += (unsigned long)pte & ~PAGE_MASK;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, map->flags,
- map->grants[pgnr].ref,
- map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, map->flags,
- 0 /* handle */);
- return 0;
-}
-
-static int map_grant_pages(struct grant_map *map)
+static void gntdev_unmap_fast(struct grant_map *map,
+ struct gnttab_unmap_grant_ref *unmap_ops)
{
- int i, err = 0;
+ int err, flags, i, unmap_size = 0;
+ phys_addr_t mfn;
- if (debug)
- printk("%s: map %d+%d\n", __FUNCTION__, map->index, map->count);
- err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
- map->map_ops, map->count);
- if (WARN_ON(err))
- return err;
+ flags = GNTMAP_host_map;
+ if (map->is_ro)
+ flags |= GNTMAP_readonly;
- for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status)
- err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ for (i=0; i < map->count; i++) {
+ if (!map->pages[i].page)
+ continue;
+ mfn =
(phys_addr_t)pfn_to_kaddr(page_to_pfn(map->pages[i].page));
+ gnttab_set_unmap_op(&unmap_ops[unmap_size], mfn, flags,
+ map->pages[i].handle);
+ unmap_size++;
}
- return err;
-}
-
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
-{
- int i, err = 0;
- if (debug)
- printk("%s: map %d+%d [%d+%d]\n", __FUNCTION__,
- map->index, map->count, offset, pages);
err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
- map->unmap_ops + offset, pages);
- if (WARN_ON(err))
- return err;
+ unmap_ops, unmap_size);
+ WARN_ON(err);
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
- }
- return err;
+ for (i = 0; i < unmap_size; i++)
+ WARN_ON(unmap_ops[i].status);
}
-/* ------------------------------------------------------------------ */
-
-static void gntdev_vma_close(struct vm_area_struct *vma)
+// for the out-of-memory case
+static void gntdev_unmap_slow(struct grant_map *map)
{
- struct grant_map *map = vma->vm_private_data;
+ int err, flags, i;
+ phys_addr_t mfn;
+ struct gnttab_unmap_grant_ref unmap_op;
- if (debug)
- printk("%s\n", __FUNCTION__);
- map->is_mapped = 0;
- map->vma = NULL;
- vma->vm_private_data = NULL;
-}
+ flags = GNTMAP_host_map;
+ if (map->is_ro)
+ flags |= GNTMAP_readonly;
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- if (debug)
- printk("%s: vaddr %p, pgoff %ld (shouldn't happen)\n",
- __FUNCTION__, vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
-}
-
-static struct vm_operations_struct gntdev_vmops = {
- .close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
-};
-
-/* ------------------------------------------------------------------ */
-
-static void mn_invl_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
- unsigned long mstart, mend;
- int err;
-
- spin_lock(&priv->lock);
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (!map->is_mapped)
+ for (i=0; i < map->count; i++) {
+ if (!map->pages[i].page)
continue;
- if (map->vma->vm_start >= end)
- continue;
- if (map->vma->vm_end <= start)
- continue;
- mstart = max(start, map->vma->vm_start);
- mend = min(end, map->vma->vm_end);
- if (debug)
- printk("%s: map %d+%d (%lx %lx), range %lx %lx, mrange
%lx %lx\n",
- __FUNCTION__, map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- start, end, mstart, mend);
- err = unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >>
PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
+
+ mfn =
(phys_addr_t)pfn_to_kaddr(page_to_pfn(map->pages[i].page));
+ gnttab_set_unmap_op(&unmap_op, mfn, flags,
map->pages[i].handle);
+ err = HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, &unmap_op, 1);
WARN_ON(err);
+ WARN_ON(unmap_op.status);
}
- spin_unlock(&priv->lock);
-}
-
-static void mn_invl_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
}
-static void mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
+static void gntdev_put_map(struct grant_map *map)
{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
- int err;
-
- spin_lock(&priv->lock);
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (debug)
- printk("%s: map %d+%d (%lx %lx)\n",
- __FUNCTION__, map->index, map->count,
- map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, 0, map->count);
- WARN_ON(err);
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ int i;
+ if (!map)
+ return;
+ if (!atomic_dec_and_test(&map->users))
+ return;
+ if (debug)
+ printk("%s: unmap %p (%d pages)\n", __FUNCTION__, map,
map->count);
+ if (map->is_mapped) {
+ unmap_ops = kzalloc(sizeof(unmap_ops[0]) * map->count,
+ GFP_TEMPORARY);
+ if (likely(unmap_ops)) {
+ gntdev_unmap_fast(map, unmap_ops);
+ kfree(unmap_ops);
+ } else {
+ gntdev_unmap_slow(map);
+ }
+ atomic_sub(map->count, &pages_mapped);
}
- spin_unlock(&priv->lock);
+ for (i=0; i < map->count; i++)
+ __free_page(map->pages[i].page);
+ kfree(map);
}
-struct mmu_notifier_ops gntdev_mmu_ops = {
- .release = mn_release,
- .invalidate_page = mn_invl_page,
- .invalidate_range_start = mn_invl_range_start,
-};
-
-/* ------------------------------------------------------------------ */
-
static int gntdev_open(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv;
@@ -360,16 +252,6 @@ static int gntdev_open(struct inode *inode, struct file
*flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->limit = limit;
-
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
- }
- priv->mn.ops = &gntdev_mmu_ops;
- mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
flip->private_data = priv;
if (debug)
@@ -382,31 +264,93 @@ static int gntdev_release(struct inode *inode, struct
file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
- if (debug)
+ if (debug) {
printk("%s: priv %p\n", __FUNCTION__, priv);
+ gntdev_print_maps(priv, NULL, 0);
+ }
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
+static int gntdev_do_map(struct grant_map *map)
+{
+ int err, flags, i;
+ struct page* page;
+ phys_addr_t mfn;
+ struct gnttab_map_grant_ref* map_ops;
+
+ flags = GNTMAP_host_map;
+ if (map->is_ro)
+ flags |= GNTMAP_readonly;
+
+ err = -ENOMEM;
+
+ if (unlikely(atomic_add_return(map->count, &pages_mapped) > limit)) {
+ if (debug)
+ printk("%s: maps full\n", __FUNCTION__);
+ goto out;
+ }
+
+ map_ops = kzalloc(sizeof(map_ops[0]) * map->count, GFP_TEMPORARY);
+ if (!map_ops)
+ goto out;
+
+ for (i = 0; i < map->count; i++) {
+ page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO);
+ if (unlikely(!page))
+ goto out_free;
+ map->pages[i].page = page;
+ mfn = (phys_addr_t)pfn_to_kaddr(page_to_pfn(page));
+ gnttab_set_map_op(&map_ops[i], mfn, flags,
+ map->pages[i].target.ref,
+ map->pages[i].target.domid);
+ }
+
+ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ map_ops, map->count);
+ if (WARN_ON(err))
+ goto out_free;
+
+ map->is_mapped = 1;
+
+ for (i = 0; i < map->count; i++) {
+ if (map_ops[i].status) {
+ if (debug)
+ printk("%s: failed map at page %d: stat=%d\n",
+ __FUNCTION__, i, map_ops[i].status);
+ __free_page(map->pages[i].page);
+ map->pages[i].page = NULL;
+ err = -EINVAL;
+ } else {
+ map->pages[i].handle = map_ops[i].handle;
+ }
+ }
+
+out_free:
+ kfree(map_ops);
+out:
+ if (!map->is_mapped)
+ atomic_sub(map->count, &pages_mapped);
+ return err;
+}
+
static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
- struct ioctl_gntdev_map_grant_ref __user
*u)
+ struct ioctl_gntdev_map_grant_ref __user
*u,
+ int delay_map)
{
struct ioctl_gntdev_map_grant_ref op;
struct grant_map *map;
+ struct ioctl_gntdev_grant_ref* grants;
int err;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -416,32 +360,48 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv,
op.count);
if (unlikely(op.count <= 0))
return -EINVAL;
- if (unlikely(op.count > priv->limit))
- return -EINVAL;
err = -ENOMEM;
- map = gntdev_alloc_map(priv, op.count);
+ grants = kmalloc(sizeof(grants[0]) * op.count, GFP_TEMPORARY);
+ if (!grants)
+ goto out_fail;
+
+ err = -EFAULT;
+ if (copy_from_user(grants, u->refs, sizeof(grants[0]) * op.count))
+ goto out_free;
+
+ map = gntdev_alloc_map(op.count, grants);
if (!map)
- return err;
- if (copy_from_user(map->grants, &u->refs,
- sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
- return err;
+ goto out_free;
+
+ if (!delay_map) {
+ if (!(op.flags & GNTDEV_MAP_WRITABLE))
+ map->is_ro = 1;
+ err = gntdev_do_map(map);
+ if (err)
+ goto out_unmap;
}
- spin_lock(&priv->lock);
gntdev_add_map(priv, map);
+
op.index = map->index << PAGE_SHIFT;
- spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
- return 0;
+ err = -EFAULT;
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ goto out_remove;
+
+ err = 0;
+
+out_free:
+ kfree(grants);
+out_fail:
+ return err;
+
+out_remove:
+ gntdev_del_map(priv, map);
+out_unmap:
+ gntdev_put_map(map);
+ goto out_free;
}
static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
@@ -449,21 +409,24 @@ static long gntdev_ioctl_unmap_grant_ref(struct
gntdev_priv *priv,
{
struct ioctl_gntdev_unmap_grant_ref op;
struct grant_map *map;
- int err = -EINVAL;
+ int err = 0;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
- if (debug)
- printk("%s: priv %p, del %d+%d\n", __FUNCTION__, priv,
- (int)op.index, (int)op.count);
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ __gntdev_del_map(priv, map);
+ } else
+ err = -EINVAL;
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
+
+ if (debug)
+ printk("%s: priv %p, del %d+%d = %p\n", __FUNCTION__, priv,
+ (int)op.index, (int)op.count, map);
+
+ gntdev_put_map(map);
return err;
}
@@ -471,6 +434,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct
gntdev_priv *priv,
struct
ioctl_gntdev_get_offset_for_vaddr __user *u)
{
struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct vm_area_struct *vma;
struct grant_map *map;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -479,40 +443,22 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct
gntdev_priv *priv,
printk("%s: priv %p, offset for vaddr %lx\n", __FUNCTION__,
priv,
(unsigned long)op.vaddr);
- spin_lock(&priv->lock);
- map = gntdev_find_map_vaddr(priv, op.vaddr);
- if (map == NULL ||
- map->vma->vm_start != op.vaddr) {
- spin_unlock(&priv->lock);
+ vma = find_vma(current->mm, op.vaddr);
+ if (!vma)
return -EINVAL;
- }
+
+ map = vma->vm_private_data;
+ if (vma->vm_ops != &gntdev_vmops || !map)
+ return -EINVAL;
+
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
- spin_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
return 0;
}
-static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
- struct ioctl_gntdev_set_max_grants
__user *u)
-{
- struct ioctl_gntdev_set_max_grants op;
-
- if (copy_from_user(&op, u, sizeof(op)) != 0)
- return -EFAULT;
- if (debug)
- printk("%s: priv %p, limit %d\n", __FUNCTION__, priv, op.count);
- if (op.count > limit)
- return -EINVAL;
-
- spin_lock(&priv->lock);
- priv->limit = op.count;
- spin_unlock(&priv->lock);
- return 0;
-}
-
static long gntdev_ioctl(struct file *flip,
unsigned int cmd, unsigned long arg)
{
@@ -521,7 +467,7 @@ static long gntdev_ioctl(struct file *flip,
switch (cmd) {
case IOCTL_GNTDEV_MAP_GRANT_REF:
- return gntdev_ioctl_map_grant_ref(priv, ptr);
+ return gntdev_ioctl_map_grant_ref(priv, ptr, 1);
case IOCTL_GNTDEV_UNMAP_GRANT_REF:
return gntdev_ioctl_unmap_grant_ref(priv, ptr);
@@ -529,8 +475,8 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
- case IOCTL_GNTDEV_SET_MAX_GRANTS:
- return gntdev_ioctl_set_max_grants(priv, ptr);
+ case IOCTL_GNTDEV_MAP_GRANT_REF_2:
+ return gntdev_ioctl_map_grant_ref(priv, ptr, 0);
default:
if (debug)
@@ -542,6 +488,34 @@ static long gntdev_ioctl(struct file *flip,
return 0;
}
+static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct grant_map *map = vma->vm_private_data;
+ pgoff_t pgoff = vmf->pgoff - vma->vm_pgoff;
+
+ if (!map || !map->is_mapped || pgoff < 0 || pgoff > map->count) {
+ if (debug)
+ printk("%s: vaddr %p, pgoff %ld (shouldn't happen)\n",
+ __FUNCTION__, vmf->virtual_address, pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vmf->page = map->pages[pgoff].page;
+ get_page(vmf->page);
+ return 0;
+}
+
+static void gntdev_vma_close(struct vm_area_struct *vma)
+{
+ struct grant_map *map = vma->vm_private_data;
+ gntdev_put_map(map);
+}
+
+static struct vm_operations_struct gntdev_vmops = {
+ .fault = gntdev_vma_fault,
+ .close = gntdev_vma_close,
+};
+
static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
{
struct gntdev_priv *priv = flip->private_data;
@@ -550,53 +524,39 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
struct grant_map *map;
int err = -EINVAL;
- if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- if (debug)
- printk("%s: map %d+%d at %lx (pgoff %lx)\n", __FUNCTION__,
- index, count, vma->vm_start, vma->vm_pgoff);
-
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
+
+ if (debug)
+ printk("%s: map %d+%d at %lx (priv %p, map %p)\n", __func__,
+ index, count, vma->vm_start, priv, map);
+
if (!map)
goto unlock_out;
- if (map->vma)
- goto unlock_out;
- if (priv->mm != vma->vm_mm) {
- printk("%s: Huh? Other mm?\n", __FUNCTION__);
- goto unlock_out;
+
+ if (!map->is_mapped) {
+ map->is_ro = !(vma->vm_flags & VM_WRITE);
+ err = gntdev_do_map(map);
+ if (err)
+ goto unlock_out;
}
+ if ((vma->vm_flags & VM_WRITE) && map->is_ro)
+ goto unlock_out;
+
+ err = 0;
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED;
- vma->vm_flags |= VM_DONTCOPY;
vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_FOREIGN;
vma->vm_private_data = map;
- map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map |
GNTMAP_contains_pte;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
-
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err) {
- goto unlock_out;
- if (debug)
- printk("%s: find_grant_ptes() failure.\n",
__FUNCTION__);
- }
-
- err = map_grant_pages(map);
- if (err) {
- goto unlock_out;
- if (debug)
- printk("%s: map_grant_pages() failure.\n",
__FUNCTION__);
- }
- map->is_mapped = 1;
+ atomic_inc(&map->users);
unlock_out:
spin_unlock(&priv->lock);
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
index 8bd1467..9df1ae3 100644
--- a/include/xen/gntdev.h
+++ b/include/xen/gntdev.h
@@ -47,11 +47,17 @@ struct ioctl_gntdev_grant_ref {
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
+#define IOCTL_GNTDEV_MAP_GRANT_REF_2 \
+_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_gntdev_map_grant_ref))
struct ioctl_gntdev_map_grant_ref {
/* IN parameters */
/* The number of grants to be mapped. */
uint32_t count;
- uint32_t pad;
+ /* Flags for this mapping */
+ union {
+ uint32_t flags;
+ uint32_t pad;
+ };
/* OUT parameters */
/* The offset to be used on a subsequent call to mmap(). */
uint64_t index;
@@ -59,6 +65,7 @@ struct ioctl_gntdev_map_grant_ref {
/* Array of grant references, of size @count. */
struct ioctl_gntdev_grant_ref refs[1];
};
+#define GNTDEV_MAP_WRITABLE 0x1
/*
* Removes the grant references from the mapping table of an instance of
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|