| 
         
xen-devel
Re: [Xen-devel] Re: 2.6.37-rc1 mainline domU - BUG: unable to handle	ker
 
 
 On Wed, Dec 1, 2010 at 10:33 PM, Jeremy Fitzhardinge  <jeremy@xxxxxxxx> wrote: 
On 12/01/2010 01:32 PM, Bruce Edge wrote: 
> I just checked the recently released 2.6.73-rc4, 
 
 Do you mean mainline 2.6.37-rc4, or the one in xen/next-2.6.37?
   mainline  
 
 
>  and while the BUG 
> signature is different, it still fails under NFS accesses. This is 
> 100% recreatable. 
 
 Please try this patch which is queued up for mainline.  It's already in 
xen/next-2.6.37.
   After testing the mainline .37 I also tried xen/next-2.6.37 with the same result. At least, I still hit a BUG after accessing an NFS mount. I didn't compare the stack, but the BUG condition looked the same. 
I'll reconfirm the location and traceback of the xen/next kernel. -Bruce  
  
 
     J 
 
 
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c 
index 21ed8d7..0e4ecac 100644 
--- a/arch/x86/xen/mmu.c 
+++ b/arch/x86/xen/mmu.c 
@@ -2358,8 +2358,6 @@ void __init xen_init_mmu_ops(void) 
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 
        pv_mmu_ops = xen_mmu_ops; 
 
-       vmap_lazy_unmap = false; 
- 
        memset(dummy_mapping, 0xff, PAGE_SIZE); 
 } 
 
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h 
index a03dcf6..44b54f6 100644 
--- a/include/linux/vmalloc.h 
+++ b/include/linux/vmalloc.h 
@@ -7,8 +7,6 @@ 
 
 struct vm_area_struct;         /* vma defining user mapping in mm_types.h */ 
 
-extern bool vmap_lazy_unmap; 
- 
 /* bits in flags of vmalloc's vm_struct below */ 
 #define VM_IOREMAP     0x00000001      /* ioremap() and friends */ 
 #define VM_ALLOC       0x00000002      /* vmalloc() */ 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c 
index a3d66b3..eb5cc7d 100644 
--- a/mm/vmalloc.c 
+++ b/mm/vmalloc.c 
@@ -31,8 +31,6 @@ 
 #include <asm/tlbflush.h> 
 #include <asm/shmparam.h> 
 
-bool vmap_lazy_unmap __read_mostly = true; 
- 
 /*** Page table manipulation functions ***/ 
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void) 
 { 
        unsigned int log; 
 
-       if (!vmap_lazy_unmap) 
-               return 0; 
- 
        log = fls(num_online_cpus()); 
 
        return log * (32UL * 1024 * 1024 / PAGE_SIZE); 
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 
                        if (va->va_end > *end) 
                                *end = va->va_end; 
                        nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 
-                       unmap_vmap_area(va); 
                        list_add_tail(&va->purge_list, &valist); 
                        va->flags |= VM_LAZY_FREEING; 
                        va->flags &= ~VM_LAZY_FREE; 
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void) 
 } 
 
 /* 
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 
- * called for the correct range previously. 
+ * Free a vmap area, caller ensuring that the area has been unmapped 
+ * and flush_cache_vunmap had been called for the correct range 
+ * previously. 
  */ 
-static void free_unmap_vmap_area_noflush(struct vmap_area *va) 
+static void free_vmap_area_noflush(struct vmap_area *va) 
 { 
        va->flags |= VM_LAZY_FREE; 
        atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 
@@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va) 
 } 
 
 /* 
+ * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 
+ * called for the correct range previously. 
+ */ 
+static void free_unmap_vmap_area_noflush(struct vmap_area *va) 
+{ 
+       unmap_vmap_area(va); 
+       free_vmap_area_noflush(va); 
+} 
+ 
+/* 
  * Free and unmap a vmap area 
  */ 
 static void free_unmap_vmap_area(struct vmap_area *va) 
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb) 
        spin_unlock(&vmap_block_tree_lock); 
        BUG_ON(tmp != vb); 
 
-       free_unmap_vmap_area_noflush(vb->va); 
+       free_vmap_area_noflush(vb->va); 
        call_rcu(&vb->rcu_head, rcu_free_vb); 
 } 
 
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size) 
        rcu_read_unlock(); 
        BUG_ON(!vb); 
 
+       vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 
+ 
        spin_lock(&vb->lock); 
        BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 
 
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void) 
 
                                s = vb->va->va_start + (i << PAGE_SHIFT); 
                                e = vb->va->va_start + (j << PAGE_SHIFT); 
-                               vunmap_page_range(s, e); 
                                flush = 1; 
 
                                if (s < start) 
 
 
  
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
 
 |   
 
 | 
    |