>From 6938ff544e7483d7eedb8eac9ccad489cced27e7 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Mon, 30 Nov 2009 21:24:59 -0500 Subject: [PATCH 1/2] [xen-fb] Provide a fb_mmap function. Provide a fb_mmap function instead of using fb_deferred_io_mmap functionality. The reason behind this is that fb_deferred_io_mmap sets the VM_IO flag which in a Xen environement is reserved for pages which have a physical device mapped. For Xen FB our "physical device" is a 2MB vmalloc area. The end result is that Xen MMU sets PTE's for this 2MB with the wrong MFN b/c it sees the _PAGE_IOMAP set (which is set when VM_IO is set). --- drivers/video/xen-fbfront.c | 125 ++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 123 insertions(+), 2 deletions(-) diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index 0c6b1c6..ae83653 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -32,6 +32,13 @@ #include #include +struct xenfb_mapping { + struct list_head link; + struct vm_area_struct *vma; + atomic_t map_refs; + struct xenfb_info *info; +}; + struct xenfb_info { unsigned char *fb; struct fb_info *fb_info; @@ -48,6 +55,9 @@ struct xenfb_info { int resize_dpy; /* ditto */ spinlock_t resize_lock; + struct list_head mappings; + spinlock_t mm_lock; + struct xenbus_device *xbdev; }; @@ -321,12 +331,118 @@ static int xenfb_set_par(struct fb_info *info) return 0; } + +static void xenfb_vm_close(struct vm_area_struct *vma) +{ + struct xenfb_mapping *map = vma->vm_private_data; + struct xenfb_info *info = map->info; + unsigned long flags; + + spin_lock_irqsave(&info->mm_lock, flags); + if (atomic_dec_and_test(&map->map_refs)) { + list_del(&map->link); + kfree(map); + } + spin_unlock_irqrestore(&info->mm_lock, flags); +} + +static void xenfb_vm_open(struct vm_area_struct *vma) +{ + struct xenfb_mapping *map = vma->vm_private_data; + atomic_inc(&map->map_refs); +} + + +/* this is to find and return the vmalloc-ed fb pages */ +static int xenfb_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct xenfb_mapping *map = vma->vm_private_data; + struct xenfb_info *info = map->info; + unsigned long offset; + struct page *page; + int y1, y2; + + offset = vmf->pgoff << PAGE_SHIFT; + if (offset >= info->fb_info->fix.smem_len) + return VM_FAULT_SIGBUS; + + page = vmalloc_to_page(info->fb_info->screen_base + offset); + if (!page) + return VM_FAULT_SIGBUS; + + get_page(page); + + page->index = vmf->pgoff; + + y1 = vmf->pgoff * PAGE_SIZE / info->fb_info->fix.line_length; + y2 = (vmf->pgoff * PAGE_SIZE + PAGE_SIZE - 1) / + info->fb_info->fix.line_length; + if (y2 > info->fb_info->var.yres) + y2 = info->fb_info->var.yres; + + xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1); + + vmf->page = page; + return 0; +} + +static struct vm_operations_struct xenfb_vm_ops = { + .open = xenfb_vm_open, + .close = xenfb_vm_close, + .fault = xenfb_vm_fault, +}; + +static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma) +{ + struct xenfb_info *info = fb_info->par; + struct xenfb_mapping *map; + int map_pages; + unsigned long flags; + + if (!(vma->vm_flags & VM_WRITE)) + return -EINVAL; + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + if (vma->vm_pgoff != 0) + return -EINVAL; + + map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT; + if (map_pages > info->nr_pages) + return -EINVAL; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->vma = vma; + map->info = info; + atomic_set(&map->map_refs, 1); + + spin_lock_irqsave(&info->mm_lock, flags); + list_add(&map->link, &info->mappings); + spin_unlock_irqrestore(&info->mm_lock, flags); + + vma->vm_ops = &xenfb_vm_ops; + /* It is _extremely_ important that VM_IO is not set here. If you do + * set it, the xen_set_pte (called later by __do_fault) will assign + * the pte to the DOMID_IO which is reserved for IO pages (ioremap, + * and its friends), not vmalloc-ed ones. The result is an ugly + * infinite page fault recursion. */ + vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED); + vma->vm_private_data = map; + + return 0; +} + + static struct fb_ops xenfb_fb_ops = { .owner = THIS_MODULE, .fb_read = fb_sys_read, .fb_write = xenfb_write, .fb_setcolreg = xenfb_setcolreg, .fb_fillrect = xenfb_fillrect, + .fb_mmap = xenfb_mmap, .fb_copyarea = xenfb_copyarea, .fb_imageblit = xenfb_imageblit, .fb_check_var = xenfb_check_var, @@ -391,6 +507,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, spin_lock_init(&info->dirty_lock); spin_lock_init(&info->resize_lock); + spin_lock_init(&info->mm_lock); + INIT_LIST_HEAD(&info->mappings); + info->fb = vmalloc(fb_size); if (info->fb == NULL) goto error_nomem; @@ -449,8 +568,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, goto error; } + /* The xen_fb_mmap replaces this. fb_info->fbdefio = &xenfb_defio; fb_deferred_io_init(fb_info); + */ xenfb_init_shared_page(info, fb_info); @@ -460,7 +581,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, ret = register_framebuffer(fb_info); if (ret) { - fb_deferred_io_cleanup(fb_info); + /* fb_deferred_io_cleanup(fb_info); */ fb_dealloc_cmap(&fb_info->cmap); framebuffer_release(fb_info); xenbus_dev_fatal(dev, ret, "register_framebuffer"); @@ -516,7 +637,7 @@ static int xenfb_remove(struct xenbus_device *dev) xenfb_disconnect_backend(info); if (info->fb_info) { - fb_deferred_io_cleanup(info->fb_info); + /* fb_deferred_io_cleanup(info->fb_info); */ unregister_framebuffer(info->fb_info); fb_dealloc_cmap(&info->fb_info->cmap); framebuffer_release(info->fb_info); -- 1.6.2.5