HVM does not allow direct PTE modification, so instead we request
that Xen change its internal p2m mappings on the allocated pages and
map the memory into userspace normally.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
drivers/xen/gntdev.c | 109 +++++++++++++++++++++++++++++++--------------
drivers/xen/grant-table.c | 7 +++
2 files changed, 83 insertions(+), 33 deletions(-)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f1fc8fa..0985577 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -49,6 +50,8 @@ static int limit = 1024*1024;
module_param(limit, int, 0644);
static atomic_t pages_mapped = ATOMIC_INIT(0);
+static int use_ptemod = 0;
+
struct gntdev_priv {
struct list_head maps;
spinlock_t lock;
@@ -209,9 +212,12 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
unsigned long addr, void
static int map_grant_pages(struct grant_map *map)
{
int i, flags, err = 0;
+ phys_addr_t addr;
struct gnttab_map_grant_ref* map_ops = NULL;
- flags = GNTMAP_host_map | GNTMAP_application_map | GNTMAP_contains_pte;
+ flags = GNTMAP_host_map;
+ if (use_ptemod)
+ flags |= GNTMAP_application_map | GNTMAP_contains_pte;
if (map->is_ro)
flags |= GNTMAP_readonly;
@@ -221,7 +227,11 @@ static int map_grant_pages(struct grant_map *map)
goto out;
for(i=0; i < map->count; i++) {
- gnttab_set_map_op(&map_ops[i], map->pginfo[i].pte_maddr, flags,
+ if (use_ptemod)
+ addr = map->pginfo[i].pte_maddr;
+ else
+ addr =
(phys_addr_t)pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_map_op(&map_ops[i], addr, flags,
map->pginfo[i].target.ref,
map->pginfo[i].target.domid);
}
@@ -253,6 +263,7 @@ static void unmap_grant_pages(struct grant_map *map, int
offset, int pages)
int i, flags, err = 0;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_unmap_grant_ref unmap_single;
+ phys_addr_t addr;
if (pages > 1) {
unmap_ops = kzalloc(sizeof(unmap_ops[0]) * pages,
@@ -266,14 +277,23 @@ static void unmap_grant_pages(struct grant_map *map, int
offset, int pages)
unmap_ops = &unmap_single;
}
- flags = GNTMAP_host_map | GNTMAP_application_map | GNTMAP_contains_pte;
+ flags = GNTMAP_host_map;
+ if (use_ptemod)
+ flags |= GNTMAP_application_map | GNTMAP_contains_pte;
if (map->is_ro)
flags |= GNTMAP_readonly;
- for(i=0; i < pages; i++)
- gnttab_set_unmap_op(&unmap_ops[i],
- map->pginfo[offset+i].pte_maddr, flags,
+ for(i=0; i < pages; i++) {
+ if (WARN_ON(!map->pages[i]))
+ continue;
+ if (use_ptemod)
+ addr = map->pginfo[i].pte_maddr;
+ else
+ addr =
(phys_addr_t)pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_unmap_op(&unmap_ops[i], addr, flags,
map->pginfo[offset+i].handle);
+ }
+
if (debug)
printk("%s: map %d+%d [%d+%d]\n", __FUNCTION__,
map->index, map->count, offset, pages);
@@ -284,8 +304,25 @@ static void unmap_grant_pages(struct grant_map *map, int
offset, int pages)
goto out;
for (i = 0; i < pages; i++) {
+ uint32_t check, *tmp;
WARN_ON(unmap_ops[i].status);
- __free_page(map->pages[offset+i]);
+ if (!map->pages[i])
+ continue;
+ /* XXX When unmapping, Xen will sometimes end up mapping the GFN
+ * to an invalid MFN. In this case, writes will be discarded and
+ * reads will return all 0xFF bytes. Leak these unusable GFNs
+ * until a way to restore them is found.
+ */
+ tmp = kmap(map->pages[i]);
+ tmp[0] = 0xdeaddead;
+ mb();
+ check = tmp[0];
+ kunmap(map->pages[i]);
+ if (check == 0xdeaddead)
+ __free_page(map->pages[i]);
+ else if (debug)
+ printk("%s: Discard page %d=%ld\n", __func__,
+ i, page_to_pfn(map->pages[i]));
map->pages[offset+i] = NULL;
map->pginfo[offset+i].handle = 0;
}
@@ -308,18 +345,8 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
gntdev_put_map(map);
}
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- if (debug)
- printk("%s: vaddr %p, pgoff %ld (shouldn't happen)\n",
- __FUNCTION__, vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
-}
-
static struct vm_operations_struct gntdev_vmops = {
.close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
};
/* ------------------------------------------------------------------ */
@@ -401,14 +428,16 @@ static int gntdev_open(struct inode *inode, struct file
*flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
+ if (use_ptemod) {
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
}
- priv->mn.ops = &gntdev_mmu_ops;
- mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
flip->private_data = priv;
if (debug)
@@ -433,7 +462,8 @@ static int gntdev_release(struct inode *inode, struct file
*flip)
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
@@ -577,7 +607,7 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct grant_map *map;
- int err = -EINVAL;
+ int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -592,7 +622,7 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
goto unlock_out;
if (use_ptemod && map->vma)
goto unlock_out;
- if (priv->mm != vma->vm_mm) {
+ if (use_ptemod && priv->mm != vma->vm_mm) {
printk("%s: Huh? Other mm?\n", __FUNCTION__);
goto unlock_out;
}
@@ -615,18 +645,29 @@ static int gntdev_mmap(struct file *flip, struct
vm_area_struct *vma)
spin_unlock(&priv->lock);
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err)
- return err;
+ if (use_ptemod) {
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err)
+ return err;
+ }
err = map_grant_pages(map);
if (err)
return err;
-
+
map->is_mapped = 1;
+ if (!use_ptemod) {
+ for(i = 0; i < count; i++) {
+ err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
+ map->pages[i]);
+ if (err)
+ return err;
+ }
+ }
+
return 0;
unlock_out:
@@ -657,6 +698,8 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
+ use_ptemod = xen_pv_domain();
+
err = misc_register(&gntdev_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index a5cf820..c8ab76e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -457,6 +457,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
pfn = mfn_to_pfn(map_ops[i].host_addr >> PAGE_SHIFT);
pte = (pte_t *) __va((pfn << PAGE_SHIFT) +
@@ -476,6 +479,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref
*unmap_ops,
int i, ret;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops,
count);
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++)
m2p_remove_override(pages[i]);
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|