On Wed, Mar 28, 2007 at 01:56:16PM +0100, Derek Murray wrote:
> +static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
> +{
...
> + /* In this case, we simply insert the page into the VM
> + * area. */
> + ret = vm_insert_page(vma, user_vaddr, page);
...
> +undo_map_out:
> +
> + /* If we have a mapping failure during the mmap, we rollback all
> + * mappings.
> + */
> +
> + /* First undo the kernel mappings. */
> + for (i = 0; i < undo_count_kmaps; ++i) {
> + struct gnttab_unmap_grant_ref unmap_op;
> + gnttab_set_unmap_op(&unmap_op,
> + get_kernel_vaddr(vma, slot_index + i),
> + GNTMAP_host_map,
> + private_data->grants[slot_index]
> + .u.valid.kernel_handle);
> + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
> + &unmap_op, 1);
> + BUG_ON(ret);
> +
> + printk("Kernel unmap status = %d\n", unmap_op.status);
> +
> + /* Return slot to the not-yet-mapped state, so that it may be
> + * mapped again, or removed by a subsequent ioctl.
> + */
> + private_data->grants[slot_index+i].state =
> + GNTDEV_SLOT_NOT_YET_MAPPED;
> +
> + /* Invalidate the physical to machine mapping for this page. */
> + set_phys_to_machine(__pa(get_kernel_vaddr(vma, slot_index))
> + >> PAGE_SHIFT, INVALID_P2M_ENTRY);
> + }
This doesn't undo vm_insert_page() when auto trasnalted physmap mode.
Probably just calling zap_page_range() would be OK instead
of undo_count_kmap loop.
> +static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
> + pte_t *ptep, int is_fullmm)
> +{
> + int slot_index, ret;
> + pte_t copy;
> + struct gnttab_unmap_grant_ref op;
> + gntdev_file_private_data_t *private_data
> + = (gntdev_file_private_data_t *) vma->vm_file->private_data;
> +
> + /* Copy the existing value of the PTE for returning. */
> + copy = *ptep;
> +
> + /* Calculate the grant relating to this PTE. */
> + slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
> +
> + /* First, we unmap the grant from kernel space. */
> + gnttab_set_unmap_op(&op, get_kernel_vaddr(vma, slot_index),
> + GNTMAP_host_map,
> + private_data->grants[slot_index].u.valid
> + .kernel_handle);
> + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
> + BUG_ON(ret);
> + if (op.status)
> + printk("Kernel unmap grant status = %d\n", op.status);
> +
> + /* Next, we clear the user space mapping. */
> + if (!xen_feature(XENFEAT_auto_translated_physmap)) {
> + /* NOT USING SHADOW PAGE TABLES. */
> + gnttab_set_unmap_op(&op, virt_to_machine(ptep),
> + GNTMAP_contains_pte,
> + private_data->grants[slot_index].u.valid
> + .user_handle);
> + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
> + &op, 1);
> + if (op.status)
> + printk("User unmap grant status = %d\n", op.status);
> + BUG_ON(ret);
> + } else {
> + /* USING SHADOW PAGE TABLES. */
> + pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
Please clear the entry before grant table unmap.
If the virtual address is accessed between grant tabel unmap and
pte_clear_full(), something bad would happen.
The higher level exclusion is done and I might be somewhat paranoia, though.
> +/* Called when an ioctl is made on the device.
> + */
> +static int gntdev_ioctl(struct inode *inode, struct file *flip,
> + unsigned int cmd, unsigned long arg)
> +{
> + int rc = 0;
> + switch (cmd) {
> + case IOCTL_GNTDEV_MAP_GRANT_REF:
> + {
> + struct ioctl_gntdev_map_grant_ref op;
> + struct ioctl_gntdev_grant_ref ref;
> + down_write(¤t->mm->mmap_sem);
> +
> + if ((rc = copy_from_user(&op,
> + (void __user *) arg,
> + sizeof(op)))) {
copy_from/to_user() may result in page fault and page fault handler
may try to obtain it causing dead lock.
--
yamahata
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|