WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merge.

# HG changeset patch
# User adsharma@xxxxxxxxxxxxxxxxxxxx
# Node ID 99914b54f7bffc8c27757a1ac2bc7a0d97597ac8
# Parent  84ee014ebd41cc93ca02b3a55f58b27575fdfcf9
# Parent  978ce7f6a3eb82c126b83a1b8e8665f16ea90de4
Merge.

diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Wed Aug 
17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Thu Aug 
18 18:40:02 2005
@@ -541,7 +541,7 @@
 # CONFIG_IP_NF_MATCH_STATE is not set
 # CONFIG_IP_NF_MATCH_CONNTRACK is not set
 # CONFIG_IP_NF_MATCH_OWNER is not set
-# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+CONFIG_IP_NF_MATCH_PHYSDEV=y
 # CONFIG_IP_NF_MATCH_ADDRTYPE is not set
 # CONFIG_IP_NF_MATCH_REALM is not set
 # CONFIG_IP_NF_MATCH_SCTP is not set
@@ -689,7 +689,7 @@
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
 # CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
+CONFIG_SK98LIN=y
 # CONFIG_VIA_VELOCITY is not set
 CONFIG_TIGON3=y
 # CONFIG_BNX2 is not set
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Wed Aug 
17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Thu Aug 
18 18:40:02 2005
@@ -480,7 +480,7 @@
 # CONFIG_IP_NF_MATCH_STATE is not set
 # CONFIG_IP_NF_MATCH_CONNTRACK is not set
 # CONFIG_IP_NF_MATCH_OWNER is not set
-# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+CONFIG_IP_NF_MATCH_PHYSDEV=y
 # CONFIG_IP_NF_MATCH_ADDRTYPE is not set
 # CONFIG_IP_NF_MATCH_REALM is not set
 # CONFIG_IP_NF_MATCH_SCTP is not set
@@ -611,7 +611,7 @@
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
 # CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
+CONFIG_SK98LIN=y
 # CONFIG_VIA_VELOCITY is not set
 CONFIG_TIGON3=y
 # CONFIG_BNX2 is not set
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Wed Aug 
17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Thu Aug 
18 18:40:02 2005
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.12.4-xenU
-# Mon Aug 15 19:25:22 2005
+# Linux kernel version: 2.6.12-xenU
+# Thu Aug 18 11:15:14 2005
 #
 CONFIG_XEN=y
 CONFIG_ARCH_XEN=y
@@ -270,7 +270,10 @@
 CONFIG_IP_ROUTE_MULTIPATH=y
 # CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
 CONFIG_IP_ROUTE_VERBOSE=y
-# CONFIG_IP_PNP is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPGRE_BROADCAST=y
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c       Wed Aug 17 
20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c       Thu Aug 18 
18:40:02 2005
@@ -49,13 +49,14 @@
  * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
  * API.
  */
-static char *io_tlb_start, *io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
- * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
+static char *iotlb_virt_start, *iotlb_virt_end;
+static dma_addr_t iotlb_bus_start, iotlb_bus_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) betweeen iotlb_virt_start and
+ * iotlb_virt_end.  This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long iotlb_nslabs;
 
 /*
  * When the IOMMU overflows we return a fallback buffer. This sets the size.
@@ -88,11 +89,14 @@
 static int __init
 setup_io_tlb_npages(char *str)
 {
+       /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
        if (isdigit(*str)) {
-               io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
-                       (PAGE_SHIFT - IO_TLB_SHIFT);
-               /* avoid tail segment of size < IO_TLB_SEGSIZE */
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+               iotlb_nslabs = simple_strtoul(str, &str, 0) <<
+                       (20 - IO_TLB_SHIFT);
+               iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
+               /* Round up to power of two (xen_create_contiguous_region). */
+               while (iotlb_nslabs & (iotlb_nslabs-1))
+                       iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
        }
        if (*str == ',')
                ++str;
@@ -114,45 +118,55 @@
 void
 swiotlb_init_with_default_size (size_t default_size)
 {
-       unsigned long i;
-
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> PAGE_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
+       unsigned long i, bytes;
+
+       if (!iotlb_nslabs) {
+               iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
+               /* Round up to power of two (xen_create_contiguous_region). */
+               while (iotlb_nslabs & (iotlb_nslabs-1))
+                       iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
+       }
+
+       bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
 
        /*
         * Get IO TLB memory from the low pages
         */
-       io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
-                                              (1 << IO_TLB_SHIFT));
-       if (!io_tlb_start)
+       iotlb_virt_start = alloc_bootmem_low_pages(bytes);
+       if (!iotlb_virt_start)
                panic("Cannot allocate SWIOTLB buffer");
 
        xen_create_contiguous_region(
-               (unsigned long)io_tlb_start, 
-               get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
-
-       io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+               (unsigned long)iotlb_virt_start, get_order(bytes));
+
+       iotlb_virt_end = iotlb_virt_start + bytes;
 
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
-       io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
-       for (i = 0; i < io_tlb_nslabs; i++)
+        * between iotlb_virt_start and iotlb_virt_end.
+        */
+       io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
+       for (i = 0; i < iotlb_nslabs; i++)
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
        io_tlb_index = 0;
        io_tlb_orig_addr = alloc_bootmem(
-               io_tlb_nslabs * sizeof(*io_tlb_orig_addr));
+               iotlb_nslabs * sizeof(*io_tlb_orig_addr));
 
        /*
         * Get the overflow emergency buffer
         */
        io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-       printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
-              virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end-1));
+       iotlb_bus_start = virt_to_bus(iotlb_virt_start);
+       iotlb_bus_end   = iotlb_bus_start + bytes;
+       printk(KERN_INFO "Software IO TLB enabled: \n"
+              " Aperture:     %lu megabytes\n"
+              " Bus range:    0x%016lx - 0x%016lx\n"
+              " Kernel range: 0x%016lx - 0x%016lx\n",
+              bytes >> 20,
+              (unsigned long)iotlb_bus_start, (unsigned long)iotlb_bus_end,
+              (unsigned long)iotlb_virt_start, (unsigned long)iotlb_virt_end);
 }
 
 void
@@ -240,7 +254,7 @@
        {
                wrap = index = ALIGN(io_tlb_index, stride);
 
-               if (index >= io_tlb_nslabs)
+               if (index >= iotlb_nslabs)
                        wrap = index = 0;
 
                do {
@@ -260,7 +274,7 @@
                                      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
                                     i--)
                                        io_tlb_list[i] = ++count;
-                               dma_addr = io_tlb_start +
+                               dma_addr = iotlb_virt_start +
                                        (index << IO_TLB_SHIFT);
 
                                /*
@@ -268,13 +282,13 @@
                                 * the next round.
                                 */
                                io_tlb_index = 
-                                       ((index + nslots) < io_tlb_nslabs
+                                       ((index + nslots) < iotlb_nslabs
                                         ? (index + nslots) : 0);
 
                                goto found;
                        }
                        index += stride;
-                       if (index >= io_tlb_nslabs)
+                       if (index >= iotlb_nslabs)
                                index = 0;
                } while (index != wrap);
 
@@ -304,7 +318,7 @@
 {
        unsigned long flags;
        int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
        struct phys_addr buffer = io_tlb_orig_addr[index];
 
        /*
@@ -345,7 +359,7 @@
 static void
 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 {
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
        struct phys_addr buffer = io_tlb_orig_addr[index];
        BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
        __sync_single(buffer, dma_addr, size, dir);
@@ -431,11 +445,9 @@
 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
                     int dir)
 {
-       char *dma_addr = bus_to_virt(dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               unmap_single(hwdev, dma_addr, size, dir);
+       BUG_ON(dir == DMA_NONE);
+       if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
+               unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
 }
 
 /*
@@ -452,22 +464,18 @@
 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
                            size_t size, int dir)
 {
-       char *dma_addr = bus_to_virt(dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               sync_single(hwdev, dma_addr, size, dir);
+       BUG_ON(dir == DMA_NONE);
+       if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
+               sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
 }
 
 void
 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
                               size_t size, int dir)
 {
-       char *dma_addr = bus_to_virt(dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               sync_single(hwdev, dma_addr, size, dir);
+       BUG_ON(dir == DMA_NONE);
+       if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
+               sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
 }
 
 /*
@@ -603,11 +611,9 @@
 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
                   size_t size, enum dma_data_direction direction)
 {
-       char *dma_addr = bus_to_virt(dma_address);
-
        BUG_ON(direction == DMA_NONE);
-       if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               unmap_single(hwdev, dma_addr, size, direction);
+       if ((dma_address >= iotlb_bus_start) && (dma_address < iotlb_bus_end))
+               unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
 }
 
 int
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Wed Aug 17 
20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Thu Aug 18 
18:40:02 2005
@@ -59,124 +59,124 @@
 #ifndef CONFIG_XEN_SHADOW_MODE
 void xen_l1_entry_update(pte_t *ptr, pte_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pte_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pte_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pmd_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pmd_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_X86_PAE
 void xen_l3_entry_update(pud_t *ptr, pud_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pud_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pud_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 #endif
 
 #ifdef CONFIG_X86_64
 void xen_l3_entry_update(pud_t *ptr, pud_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = val.pud;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = val.pud;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = val.pgd;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = val.pgd;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 #endif /* CONFIG_X86_64 */
 #endif /* CONFIG_XEN_SHADOW_MODE */
 
 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
 {
-    mmu_update_t u;
-    u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    u.val = pfn;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+       u.val = pfn;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pt_switch(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_NEW_BASEPTR;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_NEW_BASEPTR;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_new_user_pt(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_NEW_USER_BASEPTR;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_NEW_USER_BASEPTR;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_tlb_flush(void)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_INVLPG_LOCAL;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_INVLPG_LOCAL;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_SMP
 
 void xen_tlb_flush_all(void)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_TLB_FLUSH_ALL;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_TLB_FLUSH_ALL;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_tlb_flush_mask(cpumask_t *mask)
 {
-    struct mmuext_op op;
-    if ( cpus_empty(*mask) )
-        return;
-    op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-    op.vcpumask = mask->bits;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       if ( cpus_empty(*mask) )
+               return;
+       op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+       op.vcpumask = mask->bits;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg_all(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_INVLPG_ALL;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_INVLPG_ALL;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
 {
-    struct mmuext_op op;
-    if ( cpus_empty(*mask) )
-        return;
-    op.cmd = MMUEXT_INVLPG_MULTI;
-    op.vcpumask = mask->bits;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       if ( cpus_empty(*mask) )
+               return;
+       op.cmd = MMUEXT_INVLPG_MULTI;
+       op.vcpumask = mask->bits;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #endif /* CONFIG_SMP */
@@ -184,221 +184,281 @@
 #ifndef CONFIG_XEN_SHADOW_MODE
 void xen_pgd_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
+       struct mmuext_op op;
 #ifdef CONFIG_X86_64
-    op.cmd = MMUEXT_PIN_L4_TABLE;
+       op.cmd = MMUEXT_PIN_L4_TABLE;
 #elif defined(CONFIG_X86_PAE)
-    op.cmd = MMUEXT_PIN_L3_TABLE;
+       op.cmd = MMUEXT_PIN_L3_TABLE;
 #else
-    op.cmd = MMUEXT_PIN_L2_TABLE;
-#endif
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       op.cmd = MMUEXT_PIN_L2_TABLE;
+#endif
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pgd_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pte_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L1_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L1_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pte_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_X86_64
 void xen_pud_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L3_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L3_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pud_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pmd_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L2_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L2_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pmd_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 #endif /* CONFIG_X86_64 */
 #endif /* CONFIG_XEN_SHADOW_MODE */
 
 void xen_set_ldt(unsigned long ptr, unsigned long len)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_SET_LDT;
-    op.linear_addr = ptr;
-    op.nr_ents = len;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_SET_LDT;
+       op.linear_addr = ptr;
+       op.nr_ents = len;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+/*
+ * Bitmap is indexed by page number. If bit is set, the page is part of a
+ * xen_create_contiguous_region() area of memory.
+ */
+unsigned long *contiguous_bitmap;
+
+static void contiguous_bitmap_set(
+       unsigned long first_page, unsigned long nr_pages)
+{
+       unsigned long start_off, end_off, curr_idx, end_idx;
+
+       curr_idx  = first_page / BITS_PER_LONG;
+       start_off = first_page & (BITS_PER_LONG-1);
+       end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
+       end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
+
+       if (curr_idx == end_idx) {
+               contiguous_bitmap[curr_idx] |=
+                       ((1UL<<end_off)-1) & -(1UL<<start_off);
+       } else {
+               contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
+               while ( ++curr_idx < end_idx )
+                       contiguous_bitmap[curr_idx] = ~0UL;
+               contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
+       }
+}
+
+static void contiguous_bitmap_clear(
+       unsigned long first_page, unsigned long nr_pages)
+{
+       unsigned long start_off, end_off, curr_idx, end_idx;
+
+       curr_idx  = first_page / BITS_PER_LONG;
+       start_off = first_page & (BITS_PER_LONG-1);
+       end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
+       end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
+
+       if (curr_idx == end_idx) {
+               contiguous_bitmap[curr_idx] &=
+                       -(1UL<<end_off) | ((1UL<<start_off)-1);
+       } else {
+               contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
+               while ( ++curr_idx != end_idx )
+                       contiguous_bitmap[curr_idx] = 0;
+               contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
+       }
 }
 
 /* Ensure multi-page extents are contiguous in machine memory. */
 void xen_create_contiguous_region(unsigned long vstart, unsigned int order)
 {
-    pgd_t         *pgd; 
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long  mfn, i, flags;
-
-    scrub_pages(vstart, 1 << order);
-
-    balloon_lock(flags);
-
-    /* 1. Zap current PTEs, giving away the underlying pages. */
-    for (i = 0; i < (1<<order); i++) {
-        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-        mfn = pte_mfn(*pte);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
-    }
-
-    /* 2. Get a new contiguous memory extent. */
-    BUG_ON(HYPERVISOR_dom_mem_op(
-              MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
-
-    /* 3. Map the new extent in place of old pages. */
-    for (i = 0; i < (1<<order); i++) {
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE),
-            __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
-        xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
-    }
-
-    flush_tlb_all();
-
-    balloon_unlock(flags);
+       pgd_t         *pgd; 
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long  mfn, i, flags;
+
+       scrub_pages(vstart, 1 << order);
+
+       balloon_lock(flags);
+
+       /* 1. Zap current PTEs, giving away the underlying pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               mfn = pte_mfn(*pte);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+       }
+
+       /* 2. Get a new contiguous memory extent. */
+       BUG_ON(HYPERVISOR_dom_mem_op(
+               MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
+
+       /* 3. Map the new extent in place of old pages. */
+       for (i = 0; i < (1<<order); i++) {
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE),
+                       pfn_pte_ma(mfn+i, PAGE_KERNEL), 0));
+               xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
+       }
+
+       flush_tlb_all();
+
+       contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
+
+       balloon_unlock(flags);
 }
 
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
 {
-    pgd_t         *pgd; 
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long  mfn, i, flags;
-
-    scrub_pages(vstart, 1 << order);
-
-    balloon_lock(flags);
-
-    /* 1. Zap current PTEs, giving away the underlying pages. */
-    for (i = 0; i < (1<<order); i++) {
-        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-        mfn = pte_mfn(*pte);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
-    }
-
-    /* 2. Map new pages in place of old pages. */
-    for (i = 0; i < (1<<order); i++) {
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_increase_reservation, &mfn, 1, 0) != 1);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE),
-            __pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
-        xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
-    }
-
-    flush_tlb_all();
-
-    balloon_unlock(flags);
+       pgd_t         *pgd; 
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long  mfn, i, flags;
+
+       scrub_pages(vstart, 1 << order);
+
+       balloon_lock(flags);
+
+       contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
+
+       /* 1. Zap current PTEs, giving away the underlying pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               mfn = pte_mfn(*pte);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+       }
+
+       /* 2. Map new pages in place of old pages. */
+       for (i = 0; i < (1<<order); i++) {
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_increase_reservation, &mfn, 1, 0) != 1);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE),
+                       pfn_pte_ma(mfn, PAGE_KERNEL), 0));
+               xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
+       }
+
+       flush_tlb_all();
+
+       balloon_unlock(flags);
 }
 
 
 unsigned long allocate_empty_lowmem_region(unsigned long pages)
 {
-    pgd_t         *pgd;
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long *pfn_array;
-    unsigned long  vstart;
-    unsigned long  i;
-    unsigned int   order = get_order(pages*PAGE_SIZE);
-
-    vstart = __get_free_pages(GFP_KERNEL, order);
-    if ( vstart == 0 )
-        return 0UL;
-
-    scrub_pages(vstart, 1 << order);
-
-    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
-    if ( pfn_array == NULL )
-        BUG();
-
-    for ( i = 0; i < (1<<order); i++ )
-    {
-        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
-        pfn_array[i] = pte_mfn(*pte);
+       pgd_t         *pgd;
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long *pfn_array;
+       unsigned long  vstart;
+       unsigned long  i;
+       unsigned int   order = get_order(pages*PAGE_SIZE);
+
+       vstart = __get_free_pages(GFP_KERNEL, order);
+       if (vstart == 0)
+               return 0UL;
+
+       scrub_pages(vstart, 1 << order);
+
+       pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+       BUG_ON(pfn_array == NULL);
+
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
+               pfn_array[i] = pte_mfn(*pte);
 #ifdef CONFIG_X86_64
-        xen_l1_entry_update(pte, __pte(0));
+               xen_l1_entry_update(pte, __pte(0));
 #else
-        BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
-                                           __pte_ma(0), 0));
-#endif
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-    }
-
-    flush_tlb_all();
-
-    balloon_put_pages(pfn_array, 1 << order);
-
-    vfree(pfn_array);
-
-    return vstart;
+               BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
+                                                   __pte_ma(0), 0));
+#endif
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+       }
+
+       flush_tlb_all();
+
+       balloon_put_pages(pfn_array, 1 << order);
+
+       vfree(pfn_array);
+
+       return vstart;
 }
 
 EXPORT_SYMBOL(allocate_empty_lowmem_region);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Wed Aug 17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Thu Aug 18 18:40:02 2005
@@ -41,6 +41,8 @@
 #include <asm/sections.h>
 #include <asm-xen/hypervisor.h>
 
+extern unsigned long *contiguous_bitmap;
+
 #if defined(CONFIG_SWIOTLB)
 extern void swiotlb_init(void);
 int swiotlb;
@@ -636,6 +638,11 @@
        int tmp;
        int bad_ppro;
        unsigned long pfn;
+
+       contiguous_bitmap = alloc_bootmem_low_pages(
+               (max_low_pfn + 2*BITS_PER_LONG) >> 3);
+       BUG_ON(!contiguous_bitmap);
+       memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
 
 #if defined(CONFIG_SWIOTLB)
        swiotlb_init(); 
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Wed Aug 17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Thu Aug 18 18:40:02 2005
@@ -300,17 +300,17 @@
 
 
 static int direct_remap_area_pte_fn(pte_t *pte, 
-                                    struct page *pte_page,
-                                    unsigned long address, 
-                                    void *data)
-{
-        mmu_update_t **v = (mmu_update_t **)data;
-
-        (*v)->ptr = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT)
-                    | ((unsigned long)pte & ~PAGE_MASK);
-        (*v)++;
-
-        return 0;
+                                   struct page *pte_page,
+                                   unsigned long address, 
+                                   void *data)
+{
+       mmu_update_t **v = (mmu_update_t **)data;
+
+       (*v)->ptr = ((physaddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
+                    PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
+       (*v)++;
+
+       return 0;
 }
 
 int direct_remap_area_pages(struct mm_struct *mm,
@@ -397,6 +397,16 @@
        }
 
        return generic_page_range(mm, address, size, f, NULL);
-}                 
+} 
 
 EXPORT_SYMBOL(touch_pte_range);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Wed Aug 17 
20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Thu Aug 18 
18:40:02 2005
@@ -426,16 +426,10 @@
 #ifdef CONFIG_XEN
 static void __init contig_initmem_init(void)
 {
-        unsigned long bootmap_size, bootmap; 
-
-        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-        bootmap = start_pfn;
-        bootmap_size = init_bootmem(bootmap, end_pfn);
-        reserve_bootmem(bootmap, bootmap_size);
-        
-        free_bootmem(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << 
PAGE_SHIFT);   
-        reserve_bootmem(0, (PFN_PHYS(start_pfn) +
-                            bootmap_size + PAGE_SIZE-1));
+        unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
+        free_bootmem(0, end_pfn << PAGE_SHIFT);   
+        /* XXX KAF: Why can't we leave low 1MB of memory free? */
+        reserve_bootmem(0, (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1));
 }
 #else
 static void __init contig_initmem_init(void)
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Wed Aug 17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Thu Aug 18 18:40:02 2005
@@ -39,6 +39,12 @@
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
 #include <asm/smp.h>
+
+extern unsigned long *contiguous_bitmap;
+
+#if defined(CONFIG_SWIOTLB)
+extern void swiotlb_init(void);
+#endif
 
 #ifndef Dprintk
 #define Dprintk(x...)
@@ -794,8 +800,12 @@
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
+       contiguous_bitmap = alloc_bootmem_low_pages(
+               (end_pfn + 2*BITS_PER_LONG) >> 3);
+       BUG_ON(!contiguous_bitmap);
+       memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
+
 #if defined(CONFIG_SWIOTLB)
-       extern void swiotlb_init(void);
        swiotlb_init(); 
 #endif
 
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Wed Aug 17 
20:34:38 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Thu Aug 18 
18:40:02 2005
@@ -213,9 +213,7 @@
             {
                 BUG_ON(HYPERVISOR_update_va_mapping(
                     (unsigned long)__va(pfn << PAGE_SHIFT),
-                    __pte_ma((mfn_list[i] << PAGE_SHIFT) |
-                             pgprot_val(PAGE_KERNEL)),
-                    0));
+                    pfn_pte_ma(mfn_list[i], PAGE_KERNEL), 0));
             }
 
             /* Finally, relinquish the memory back to the system allocator. */
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Wed Aug 17 
20:34:38 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Thu Aug 18 
18:40:02 2005
@@ -406,21 +406,15 @@
 #endif
 
 
-#ifdef CONFIG_XEN_BLKDEV_TAP_BE
     if ( HYPERVISOR_update_va_mapping_otherdomain(
         MMAP_VADDR(pending_idx, 0),
-        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
+        pfn_pte_ma(req->frame_and_sects[0] >> PAGE_SHIFT, PAGE_KERNEL),
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
         0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) )
-        
+#else
+        0, blkif->domid) )
+#endif
         goto out;
-#else
-    if ( HYPERVISOR_update_va_mapping_otherdomain(
-        MMAP_VADDR(pending_idx, 0),
-        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
-        0, blkif->domid) ) 
-        
-        goto out;
-#endif
 #endif /* endif CONFIG_XEN_BLKDEV_GRANT */
    
     rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), 
diff -r 84ee014ebd41 -r 99914b54f7bf 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h       Wed Aug 
17 20:34:38 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h       Thu Aug 
18 18:40:02 2005
@@ -26,7 +26,9 @@
 static inline int
 range_straddles_page_boundary(void *p, size_t size)
 {
-       return ((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE);
+       extern unsigned long *contiguous_bitmap;
+       return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+               !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff -r 84ee014ebd41 -r 99914b54f7bf tools/examples/network-bridge
--- a/tools/examples/network-bridge     Wed Aug 17 20:34:38 2005
+++ b/tools/examples/network-bridge     Thu Aug 18 18:40:02 2005
@@ -51,7 +51,7 @@
 
 bridge=${bridge:-xen-br0}
 netdev=${netdev:-eth0}
-antispoof=${antispoof:-yes}
+antispoof=${antispoof:-no}
 
 echo "*network $OP bridge=$bridge netdev=$netdev antispoof=$antispoof" >&2
 
diff -r 84ee014ebd41 -r 99914b54f7bf tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Wed Aug 17 20:34:38 2005
+++ b/tools/python/xen/xm/create.py     Thu Aug 18 18:40:02 2005
@@ -23,6 +23,7 @@
 import sys
 import socket
 import commands
+import time
 
 import xen.lowlevel.xc
 
@@ -674,18 +675,33 @@
     return 0
 
 def balloon_out(dom0_min_mem, opts):
-    """Balloon out to get memory for domU, if necessarily"""
+    """Balloon out memory from dom0 if necessary"""
     SLACK = 4
+    timeout = 20 # 2s
+    ret = 0
 
     xc = xen.lowlevel.xc.new()
     pinfo = xc.physinfo()
-    free_mem = pinfo['free_pages']/256
-    if free_mem < opts.vals.memory + SLACK:
-        need_mem = opts.vals.memory + SLACK - free_mem
-        cur_alloc = get_dom0_alloc()
-        if cur_alloc - need_mem >= dom0_min_mem:
-            server.xend_domain_mem_target_set(0, cur_alloc - need_mem)
+    free_mem = pinfo['free_pages'] / 256
+    domU_need_mem = opts.vals.memory + SLACK 
+
+    dom0_cur_alloc = get_dom0_alloc()
+    dom0_new_alloc = dom0_cur_alloc - (domU_need_mem - free_mem)
+
+    if free_mem < domU_need_mem and dom0_new_alloc >= dom0_min_mem:
+
+        server.xend_domain_mem_target_set(0, dom0_new_alloc)
+
+        while dom0_cur_alloc > dom0_new_alloc and timeout > 0:
+            time.sleep(0.1) # sleep 100ms
+            dom0_cur_alloc = get_dom0_alloc()
+            timeout -= 1
+        
+        if dom0_cur_alloc > dom0_new_alloc:
+            ret = 1
+    
     del xc
+    return ret
 
 def main(argv):
     random.seed()
@@ -717,7 +733,8 @@
     else:
         dom0_min_mem = xroot.get_dom0_min_mem()
         if dom0_min_mem != 0:
-            balloon_out(dom0_min_mem, opts)
+            if balloon_out(dom0_min_mem, opts):
+                return
 
         dom = make_domain(opts, config)
         if opts.vals.console_autoconnect:
diff -r 84ee014ebd41 -r 99914b54f7bf tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Wed Aug 17 20:34:38 2005
+++ b/tools/python/xen/xm/main.py       Thu Aug 18 18:40:02 2005
@@ -200,7 +200,11 @@
 def xm_list(args):
     use_long = 0
     show_vcpus = 0
-    (options, params) = getopt(args, 'lv', ['long','vcpus'])
+    try:
+        (options, params) = getopt(args, 'lv', ['long','vcpus'])
+    except GetoptError, opterr:
+        err(opterr)
+        sys.exit(1)
     
     n = len(params)
     for (k, v) in options:
diff -r 84ee014ebd41 -r 99914b54f7bf xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Aug 17 20:34:38 2005
+++ b/xen/arch/x86/mm.c Thu Aug 18 18:40:02 2005
@@ -3059,7 +3059,7 @@
     }
 
     /* Turn a sub-word access into a full-word access. */
-    if (bytes != sizeof(physaddr_t))
+    if ( bytes != sizeof(physaddr_t) )
     {
         int           rc;
         physaddr_t    full;
@@ -3076,6 +3076,10 @@
         val  &= (((physaddr_t)1 << (bytes*8)) - 1);
         val <<= (offset)*8;
         val  |= full;
+        /* Also fill in missing parts of the cmpxchg old value. */
+        old  &= (((physaddr_t)1 << (bytes*8)) - 1);
+        old <<= (offset)*8;
+        old  |= full;
     }
 
     /* Read the PTE that maps the page being updated. */
@@ -3111,7 +3115,7 @@
     if ( do_cmpxchg )
     {
         ol1e = l1e_from_intpte(old);
-        if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
+        if ( cmpxchg((intpte_t *)pl1e, old, val) != old )
         {
             unmap_domain_page(pl1e);
             put_page_from_l1e(nl1e, d);
@@ -3299,8 +3303,8 @@
     
     /* Finally, make the p.t. page writable by the guest OS. */
     l1e_add_flags(pte, _PAGE_RW);
-    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(addr)],
-                                 &pte, sizeof(pte))) )
+    if ( unlikely(__put_user(pte.l1,
+                             &linear_pg_table[l1_linear_offset(addr)].l1)) )
     {
         MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
                 &linear_pg_table[l1_linear_offset(addr)]);
diff -r 84ee014ebd41 -r 99914b54f7bf xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Aug 17 20:34:38 2005
+++ b/xen/arch/x86/setup.c      Thu Aug 18 18:40:02 2005
@@ -244,6 +244,8 @@
 
 #define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" )
 
+static struct e820entry e820_raw[E820MAX];
+
 void __init __start_xen(multiboot_info_t *mbi)
 {
     char *cmdline;
@@ -253,7 +255,6 @@
     unsigned long _initrd_start = 0, _initrd_len = 0;
     unsigned int initrdidx = 1;
     physaddr_t s, e;
-    struct e820entry e820_raw[E820MAX];
     int i, e820_raw_nr = 0, bytes = 0;
     struct ns16550_defaults ns16550 = {
         .data_bits = 8,
diff -r 84ee014ebd41 -r 99914b54f7bf xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Aug 17 20:34:38 2005
+++ b/xen/arch/x86/traps.c      Thu Aug 18 18:40:02 2005
@@ -159,10 +159,8 @@
         addr = *stack++;
         if ( is_kernel_text(addr) )
         {
-            if ( (i != 0) && ((i % 6) == 0) )
-                printk("\n   ");
             printk("[<%p>]", _p(addr));
-            print_symbol(" %s\n", addr);
+            print_symbol(" %s\n   ", addr);
             i++;
         }
     }
diff -r 84ee014ebd41 -r 99914b54f7bf xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed Aug 17 20:34:38 2005
+++ b/xen/arch/x86/x86_32/traps.c       Thu Aug 18 18:40:02 2005
@@ -66,8 +66,9 @@
 
     printk("CPU:    %d\nEIP:    %04lx:[<%08lx>]",
            smp_processor_id(), (unsigned long)0xffff & regs->cs, eip);
-    print_symbol(" %s\n", eip);
-    printk("EFLAGS: %08lx   CONTEXT: %s\n", eflags, context);
+    if ( !GUEST_MODE(regs) )
+        print_symbol(" %s", eip);
+    printk("\nEFLAGS: %08lx   CONTEXT: %s\n", eflags, context);
     printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
            regs->eax, regs->ebx, regs->ecx, regs->edx);
     printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08lx\n",
diff -r 84ee014ebd41 -r 99914b54f7bf xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Wed Aug 17 20:34:38 2005
+++ b/xen/arch/x86/x86_64/traps.c       Thu Aug 18 18:40:02 2005
@@ -17,8 +17,9 @@
 {
     printk("CPU:    %d\nEIP:    %04x:[<%016lx>]",
            smp_processor_id(), 0xffff & regs->cs, regs->rip);
-    print_symbol(" %s\n", regs->rip);
-    printk("EFLAGS: %016lx\n", regs->eflags);
+    if ( !GUEST_MODE(regs) )
+        print_symbol(" %s", regs->rip);
+    printk("\nEFLAGS: %016lx\n", regs->eflags);
     printk("rax: %016lx   rbx: %016lx   rcx: %016lx   rdx: %016lx\n",
            regs->rax, regs->rbx, regs->rcx, regs->rdx);
     printk("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
diff -r 84ee014ebd41 -r 99914b54f7bf xen/drivers/char/console.c
--- a/xen/drivers/char/console.c        Wed Aug 17 20:34:38 2005
+++ b/xen/drivers/char/console.c        Thu Aug 18 18:40:02 2005
@@ -652,8 +652,9 @@
 void panic(const char *fmt, ...)
 {
     va_list args;
-    char buf[128], cpustr[10];
+    char buf[128];
     unsigned long flags;
+    static spinlock_t lock = SPIN_LOCK_UNLOCKED;
     extern void machine_restart(char *);
     
     debugtrace_dump();
@@ -665,16 +666,13 @@
     debugger_trap_immediate();
 
     /* Spit out multiline message in one go. */
-    spin_lock_irqsave(&console_lock, flags);
-    __putstr("\n****************************************\n");
-    __putstr("Panic on CPU");
-    sprintf(cpustr, "%d", smp_processor_id());
-    __putstr(cpustr);
-    __putstr(":\n");
-    __putstr(buf);
-    __putstr("****************************************\n\n");
-    __putstr("Reboot in five seconds...\n");
-    spin_unlock_irqrestore(&console_lock, flags);
+    spin_lock_irqsave(&lock, flags);
+    printk("\n****************************************\n");
+    printk("Panic on CPU %d:\n", smp_processor_id());
+    printk(buf);
+    printk("****************************************\n\n");
+    printk("Reboot in five seconds...\n");
+    spin_unlock_irqrestore(&lock, flags);
 
     watchdog_disable();
     mdelay(5000);
diff -r 84ee014ebd41 -r 99914b54f7bf xen/include/asm-x86/uaccess.h
--- a/xen/include/asm-x86/uaccess.h     Wed Aug 17 20:34:38 2005
+++ b/xen/include/asm-x86/uaccess.h     Thu Aug 18 18:40:02 2005
@@ -125,22 +125,20 @@
        __pu_err;                                                       \
 })                                                     
 
-#define __get_user_nocheck(x,ptr,size)                         \
-({                                                             \
-       long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
-       __gu_err;                                               \
+#define __get_user_nocheck(x,ptr,size)                          \
+({                                                              \
+       long __gu_err;                                          \
+       __get_user_size((x),(ptr),(size),__gu_err,-EFAULT);     \
+       __gu_err;                                               \
 })
 
-#define __get_user_check(x,ptr,size)                                   \
-({                                                                     \
-       long __gu_err, __gu_val;                                        \
-       __typeof__(*(ptr)) __user *__gu_addr = (ptr);                   \
-       __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);    \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
-       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;                  \
-       __gu_err;                                                       \
+#define __get_user_check(x,ptr,size)                            \
+({                                                              \
+       long __gu_err;                                          \
+       __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
+       __get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \
+       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;          \
+       __gu_err;                                               \
 })                                                     
 
 struct __large_struct { unsigned long buf[100]; };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>