WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] merge with xen-unstable.hg (staging)

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] merge with xen-unstable.hg (staging)
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Nov 2007 04:21:20 -0800
Delivery-date: Fri, 09 Nov 2007 05:30:56 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1194472128 25200
# Node ID a1247c2df2b41d7959c1626833c2a149dd705640
# Parent  ef6415fdaf8a026aa6cba380b01152dc96cac449
# Parent  d4c5a1cdcf2e6e9c98c99d218f8b80f8baca10cd
merge with xen-unstable.hg (staging)
---
 tools/examples/block                   |    5 +
 tools/examples/network-bridge          |    4 -
 tools/ioemu/hw/pass-through.c          |   15 ++--
 tools/ioemu/hw/pass-through.h          |    2 
 tools/ioemu/hw/xen_machine_fv.c        |   45 +++++-------
 xen/arch/ia64/xen/mm.c                 |    8 --
 xen/arch/x86/domain.c                  |   23 +++---
 xen/arch/x86/hvm/irq.c                 |   13 +--
 xen/arch/x86/hvm/vmx/intr.c            |   17 +++-
 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c |    9 +-
 xen/arch/x86/hvm/vmx/vtd/io.c          |  121 ++++++++++++++++++++++-----------
 xen/arch/x86/hvm/vmx/vtd/utils.c       |   15 ++--
 xen/arch/x86/hvm/vpt.c                 |    2 
 xen/arch/x86/mm.c                      |   71 ++++++++++++++++---
 xen/arch/x86/mm/shadow/multi.c         |   12 +--
 xen/arch/x86/traps.c                   |    3 
 xen/common/grant_table.c               |    6 -
 xen/include/asm-ia64/mm.h              |    3 
 xen/include/asm-x86/domain.h           |    7 +
 xen/include/asm-x86/hvm/irq.h          |   29 +++++--
 xen/include/asm-x86/mm.h               |   28 ++++---
 xen/include/asm-x86/page.h             |   10 ++
 22 files changed, 289 insertions(+), 159 deletions(-)

diff -r ef6415fdaf8a -r a1247c2df2b4 tools/examples/block
--- a/tools/examples/block      Wed Nov 07 11:01:23 2007 -0700
+++ b/tools/examples/block      Wed Nov 07 14:48:48 2007 -0700
@@ -326,7 +326,10 @@ mount it read-write in a guest domain."
           fatal 'Failed to find an unused loop device'
         fi
 
-        do_or_die losetup "$loopdev" "$file"
+        status=$(losetup "$loopdev" "$file" || echo "failed")
+        if [ -n "$status" ]; then
+          do_or_die losetup -r "$loopdev" "$file"
+        fi
         xenstore_write "$XENBUS_PATH/node" "$loopdev"
         write_dev "$loopdev"
         release_lock "block"
diff -r ef6415fdaf8a -r a1247c2df2b4 tools/examples/network-bridge
--- a/tools/examples/network-bridge     Wed Nov 07 11:01:23 2007 -0700
+++ b/tools/examples/network-bridge     Wed Nov 07 14:48:48 2007 -0700
@@ -72,8 +72,8 @@ find_alt_device () {
     echo "$ifs"
 }
 
-netdev=${netdev:-$(ip route list | awk '/^default / { print $NF }' |
-                  sed 's/.* dev //')}
+netdev=${netdev:-$(ip route list 0.0.0.0/0  | \
+                   sed 's/.*dev \([a-z]\+[0-9]\+\).*$/\1/')}
 if is_network_root ; then
     altdevs=$(find_alt_device $netdev)
     for netdev in $altdevs; do break; done
diff -r ef6415fdaf8a -r a1247c2df2b4 tools/ioemu/hw/pass-through.c
--- a/tools/ioemu/hw/pass-through.c     Wed Nov 07 11:01:23 2007 -0700
+++ b/tools/ioemu/hw/pass-through.c     Wed Nov 07 14:48:48 2007 -0700
@@ -20,8 +20,8 @@
  * Guy Zana <guy@xxxxxxxxxxxx>
  *
  * This file implements direct PCI assignment to a HVM guest
- *
  */
+
 #include "vl.h"
 #include "pass-through.h"
 #include "pci/header.h"
@@ -127,9 +127,10 @@ void pt_iomem_map(PCIDevice *d, int i, u
     if ( !first_map )
     {
         /* Remove old mapping */
-        ret = xc_domain_memory_mapping(xc_handle, domid, old_ebase >> 12,
-                assigned_device->bases[i].access.maddr >> 12,
-                (e_size+0xFFF) >> 12,
+        ret = xc_domain_memory_mapping(xc_handle, domid,
+                old_ebase >> XC_PAGE_SHIFT,
+                assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
+                (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
                 DPCI_REMOVE_MAPPING);
         if ( ret != 0 )
         {
@@ -140,9 +141,9 @@ void pt_iomem_map(PCIDevice *d, int i, u
 
     /* Create new mapping */
     ret = xc_domain_memory_mapping(xc_handle, domid,
-            assigned_device->bases[i].e_physbase >> 12,
-            assigned_device->bases[i].access.maddr >> 12,
-            (e_size+0xFFF) >> 12,
+            assigned_device->bases[i].e_physbase >> XC_PAGE_SHIFT,
+            assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
+            (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
             DPCI_ADD_MAPPING);
     if ( ret != 0 )
         PT_LOG("Error: create new mapping failed!\n");
diff -r ef6415fdaf8a -r a1247c2df2b4 tools/ioemu/hw/pass-through.h
--- a/tools/ioemu/hw/pass-through.h     Wed Nov 07 11:01:23 2007 -0700
+++ b/tools/ioemu/hw/pass-through.h     Wed Nov 07 14:48:48 2007 -0700
@@ -40,7 +40,7 @@
 /* Misc PCI constants that should be moved to a separate library :) */
 #define PCI_CONFIG_SIZE         (256)
 #define PCI_EXP_DEVCAP_FLR      (1 << 28)
-#define PCI_EXP_DEVCTL_FLR      (0x1b)
+#define PCI_EXP_DEVCTL_FLR      (1 << 15)
 #define PCI_BAR_ENTRIES         (6)
 
 struct pt_region {
diff -r ef6415fdaf8a -r a1247c2df2b4 tools/ioemu/hw/xen_machine_fv.c
--- a/tools/ioemu/hw/xen_machine_fv.c   Wed Nov 07 11:01:23 2007 -0700
+++ b/tools/ioemu/hw/xen_machine_fv.c   Wed Nov 07 14:48:48 2007 -0700
@@ -27,13 +27,6 @@
 #include <xen/hvm/params.h>
 #include <sys/mman.h>
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE XC_PAGE_SIZE
-#endif
-#ifndef PAGE_SHIFT
-#define PAGE_SHIFT XC_PAGE_SHIFT
-#endif
-
 #if defined(MAPCACHE)
 
 #if defined(__i386__) 
@@ -57,7 +50,7 @@ struct map_cache {
 struct map_cache {
     unsigned long paddr_index;
     uint8_t      *vaddr_base;
-    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
+    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
 };
 
 static struct map_cache *mapcache_entry;
@@ -71,9 +64,9 @@ static int qemu_map_cache_init(void)
 {
     unsigned long size;
 
-    nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
-                   (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
-                  (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
+    nr_buckets = (((MAX_MCACHE_SIZE >> XC_PAGE_SHIFT) +
+                   (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
+                  (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
 
     /*
      * Use mmap() directly: lets us allocate a big hash table with no up-front
@@ -81,7 +74,7 @@ static int qemu_map_cache_init(void)
      * that we actually use. All others will contain all zeroes.
      */
     size = nr_buckets * sizeof(struct map_cache);
-    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
     fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", 
nr_buckets, size);
     mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
                           MAP_SHARED|MAP_ANON, -1, 0);
@@ -97,7 +90,7 @@ static void qemu_remap_bucket(struct map
                               unsigned long address_index)
 {
     uint8_t *vaddr_base;
-    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
+    unsigned long pfns[MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT];
     unsigned int i, j;
 
     if (entry->vaddr_base != NULL) {
@@ -108,11 +101,11 @@ static void qemu_remap_bucket(struct map
         }
     }
 
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
-        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i++)
+        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
 
     vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
-                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
+                                      pfns, MCACHE_BUCKET_SIZE >> 
XC_PAGE_SHIFT);
     if (vaddr_base == NULL) {
         fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
         exit(-1);
@@ -121,10 +114,10 @@ static void qemu_remap_bucket(struct map
     entry->vaddr_base  = vaddr_base;
     entry->paddr_index = address_index;
 
-    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
+    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i += BITS_PER_LONG) {
         unsigned long word = 0;
-        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
-            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
+        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT)) ?
+            (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT) % BITS_PER_LONG : 
BITS_PER_LONG;
         while (j > 0)
             word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
         entry->valid_mapping[i / BITS_PER_LONG] = word;
@@ -143,10 +136,10 @@ uint8_t *qemu_map_cache(target_phys_addr
     entry = &mapcache_entry[address_index % nr_buckets];
 
     if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
-        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+        !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
         qemu_remap_bucket(entry, address_index);
 
-    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
+    if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
         return NULL;
 
     last_address_index = address_index;
@@ -213,7 +206,7 @@ static void xen_init_fv(uint64_t ram_siz
 
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
-    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+    shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                        PROT_READ|PROT_WRITE, ioreq_pfn);
     if (shared_page == NULL) {
         fprintf(logfile, "map shared IO page returned error %d\n", errno);
@@ -222,7 +215,7 @@ static void xen_init_fv(uint64_t ram_siz
 
     xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
     fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
-    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+    buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
                                             PROT_READ|PROT_WRITE, ioreq_pfn);
     if (buffered_io_page == NULL) {
         fprintf(logfile, "map buffered IO page returned error %d\n", errno);
@@ -272,9 +265,9 @@ static void xen_init_fv(uint64_t ram_siz
     /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
        to make QEMU map continuous virtual memory space */
     if (ram_size > MMIO_START) {       
-        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
-            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
-                (STORE_PAGE_START >> PAGE_SHIFT); 
+        for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
+            page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
+                (STORE_XC_PAGE_START >> XC_PAGE_SHIFT); 
     }
 
     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/ia64/xen/mm.c    Wed Nov 07 14:48:48 2007 -0700
@@ -2894,11 +2894,9 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
     return 0;
 }
 
-int
-iomem_page_test(unsigned long mfn, struct page_info *page)
-{
-       return unlikely(!mfn_valid(mfn)) ||
-              unlikely(page_get_owner(page) == dom_io);
+int is_iomem_page(unsigned long mfn)
+{
+    return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
 }
 
 /*
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/domain.c     Wed Nov 07 14:48:48 2007 -0700
@@ -415,7 +415,8 @@ int vcpu_initialise(struct vcpu *v)
             v->arch.cr3           = __pa(idle_pg_table);
         }
 
-        v->arch.guest_context.ctrlreg[4] = mmu_cr4_features;
+        v->arch.guest_context.ctrlreg[4] =
+            real_cr4_to_pv_guest_cr4(mmu_cr4_features);
     }
 
     v->arch.perdomain_ptes =
@@ -573,17 +574,18 @@ void arch_domain_destroy(struct domain *
 
 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4)
 {
-    unsigned long hv_cr4 = read_cr4(), hv_cr4_mask = ~X86_CR4_TSD;
+    unsigned long hv_cr4_mask, hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4());
+
+    hv_cr4_mask = ~X86_CR4_TSD;
     if ( cpu_has_de )
         hv_cr4_mask &= ~X86_CR4_DE;
 
-    if ( (guest_cr4 & hv_cr4_mask) !=
-         (hv_cr4 & hv_cr4_mask & ~(X86_CR4_PGE|X86_CR4_PSE)) )
+    if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )
         gdprintk(XENLOG_WARNING,
                  "Attempt to change CR4 flags %08lx -> %08lx\n",
                  hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4);
 
-    return  (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
+    return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
 }
 
 /* This is called by arch_final_setup_guest and do_boot_vcpu */
@@ -684,8 +686,8 @@ int arch_set_info_guest(
     v->arch.guest_context.user_regs.eflags |= EF_IE;
 
     cr4 = v->arch.guest_context.ctrlreg[4];
-    v->arch.guest_context.ctrlreg[4] =
-        (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4);
+    v->arch.guest_context.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(cr4) :
+        real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
     memset(v->arch.guest_context.debugreg, 0,
            sizeof(v->arch.guest_context.debugreg));
@@ -1223,11 +1225,14 @@ static void paravirt_ctxt_switch_from(st
 
 static void paravirt_ctxt_switch_to(struct vcpu *v)
 {
+    unsigned long cr4;
+
     set_int80_direct_trap(v);
     switch_kernel_stack(v);
 
-    if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) )
-        write_cr4(v->arch.guest_context.ctrlreg[4]);
+    cr4 = pv_guest_cr4_to_real_cr4(v->arch.guest_context.ctrlreg[4]);
+    if ( unlikely(cr4 != read_cr4()) )
+        write_cr4(cr4);
 
     if ( unlikely(v->arch.guest_context.debugreg[7]) )
     {
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/irq.c    Wed Nov 07 14:48:48 2007 -0700
@@ -192,15 +192,12 @@ void hvm_set_pci_link_route(struct domai
     hvm_irq->pci_link.route[link] = isa_irq;
 
     /* PCI pass-through fixup. */
-    if ( hvm_irq->dpci && hvm_irq->dpci->girq[old_isa_irq].valid )
-    {
-        uint32_t device = hvm_irq->dpci->girq[old_isa_irq].device;
-        uint32_t intx = hvm_irq->dpci->girq[old_isa_irq].intx;
-        if ( link == hvm_pci_intx_link(device, intx) )
-        {
-            hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->girq[old_isa_irq];
+    if ( hvm_irq->dpci && hvm_irq->dpci->link[link].valid )
+    {
+        hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->link[link];
+        if ( hvm_irq->dpci->girq[old_isa_irq].device ==
+             hvm_irq->dpci->link[link].device )
             hvm_irq->dpci->girq[old_isa_irq].valid = 0;
-        }
     }
 
     if ( hvm_irq->pci_link_assert_count[link] == 0 )
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/intr.c       Wed Nov 07 14:48:48 2007 -0700
@@ -113,6 +113,7 @@ static void vmx_dirq_assist(struct vcpu 
     uint32_t device, intx;
     struct domain *d = v->domain;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    struct dev_intx_gsi *dig;
 
     if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
         return;
@@ -122,11 +123,17 @@ static void vmx_dirq_assist(struct vcpu 
           irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
     {
         stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
-
-        test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask);
-        device = hvm_irq_dpci->mirq[irq].device;
-        intx = hvm_irq_dpci->mirq[irq].intx;
-        hvm_pci_intx_assert(d, device, intx);
+        clear_bit(irq, &hvm_irq_dpci->dirq_mask);
+
+        list_for_each_entry ( dig, &hvm_irq_dpci->mirq[irq].dig_list, list )
+        {
+            device = dig->device;
+            intx = dig->intx;
+            hvm_pci_intx_assert(d, device, intx);
+            spin_lock(&hvm_irq_dpci->dirq_lock);
+            hvm_irq_dpci->mirq[irq].pending++;
+            spin_unlock(&hvm_irq_dpci->dirq_lock);
+        }
 
         /*
          * Set a timer to see if the guest can finish the interrupt or not. For
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Wed Nov 07 14:48:48 2007 -0700
@@ -688,6 +688,9 @@ static int iommu_enable_translation(stru
             break;
         cpu_relax();
     }
+
+    /* Disable PMRs when VT-d engine takes effect per spec definition */
+    disable_pmr(iommu);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
     return 0;
 }
@@ -1767,7 +1770,7 @@ int iommu_setup(void)
     struct hvm_iommu *hd  = domain_hvm_iommu(dom0);
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
-    unsigned long i, status;
+    unsigned long i;
 
     if ( !vtd_enabled )
         return 0;
@@ -1796,10 +1799,6 @@ int iommu_setup(void)
     setup_dom0_rmrr();
     if ( enable_vtd_translation() )
         goto error;
-
-    status = dmar_readl(iommu->reg, DMAR_PMEN_REG);
-    if (status & DMA_PMEN_PRS)
-        disable_pmr(iommu);
 
     return 0;
 
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/vmx/vtd/io.c
--- a/xen/arch/x86/hvm/vmx/vtd/io.c     Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/io.c     Wed Nov 07 14:48:48 2007 -0700
@@ -47,14 +47,27 @@
 
 static void pt_irq_time_out(void *data)
 {
-    struct hvm_irq_dpci_mapping *irq_map = data;
-    unsigned int guest_gsi, machine_gsi;
-    struct domain *d = irq_map->dom;
-
-    guest_gsi = irq_map->guest_gsi;
-    machine_gsi = d->arch.hvm_domain.irq.dpci->girq[guest_gsi].machine_gsi;
-    clear_bit(machine_gsi, d->arch.hvm_domain.irq.dpci->dirq_mask);
-    hvm_dpci_eoi(irq_map->dom, guest_gsi, NULL);
+    struct hvm_mirq_dpci_mapping *irq_map = data;
+    unsigned int guest_gsi, machine_gsi = 0;
+    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
+    struct dev_intx_gsi *dig;
+    uint32_t device, intx;
+
+    list_for_each_entry ( dig, &irq_map->dig_list, list )
+    {
+        guest_gsi = dig->gsi;
+        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
+        device = dig->device;
+        intx = dig->intx;
+        hvm_pci_intx_deassert(irq_map->dom, device, intx);
+    }
+
+    clear_bit(machine_gsi, dpci->dirq_mask);
+    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+    spin_lock(&dpci->dirq_lock);
+    dpci->mirq[machine_gsi].pending = 0;
+    spin_unlock(&dpci->dirq_lock);
+    pirq_guest_eoi(irq_map->dom, machine_gsi);
 }
 
 int pt_irq_create_bind_vtd(
@@ -62,8 +75,8 @@ int pt_irq_create_bind_vtd(
 {
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t machine_gsi, guest_gsi;
-    uint32_t device, intx;
-    uint32_t link, isa_irq;
+    uint32_t device, intx, link;
+    struct dev_intx_gsi *dig;
 
     if ( hvm_irq_dpci == NULL )
     {
@@ -72,6 +85,9 @@ int pt_irq_create_bind_vtd(
             return -ENOMEM;
 
         memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
+        spin_lock_init(&hvm_irq_dpci->dirq_lock);
+        for ( int i = 0; i < NR_IRQS; i++ )
+            INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].dig_list);
 
         if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
                      0, (unsigned long)hvm_irq_dpci) != 0 )
@@ -85,35 +101,42 @@ int pt_irq_create_bind_vtd(
     intx = pt_irq_bind->u.pci.intx;
     guest_gsi = hvm_pci_intx_gsi(device, intx);
     link = hvm_pci_intx_link(device, intx);
-    isa_irq = d->arch.hvm_domain.irq.pci_link.route[link];
-
-    hvm_irq_dpci->mirq[machine_gsi].valid = 1;
-    hvm_irq_dpci->mirq[machine_gsi].device = device;
-    hvm_irq_dpci->mirq[machine_gsi].intx = intx;
-    hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi;
-    hvm_irq_dpci->mirq[machine_gsi].dom = d;
-
+
+    dig = xmalloc(struct dev_intx_gsi);
+    if ( !dig )
+        return -ENOMEM;
+
+    dig->device = device;
+    dig->intx = intx;
+    dig->gsi = guest_gsi;
+    list_add_tail(&dig->list,
+                  &hvm_irq_dpci->mirq[machine_gsi].dig_list);
+ 
     hvm_irq_dpci->girq[guest_gsi].valid = 1;
     hvm_irq_dpci->girq[guest_gsi].device = device;
     hvm_irq_dpci->girq[guest_gsi].intx = intx;
     hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
-    hvm_irq_dpci->girq[guest_gsi].dom = d;
-
-    hvm_irq_dpci->girq[isa_irq].valid = 1;
-    hvm_irq_dpci->girq[isa_irq].device = device;
-    hvm_irq_dpci->girq[isa_irq].intx = intx;
-    hvm_irq_dpci->girq[isa_irq].machine_gsi = machine_gsi;
-    hvm_irq_dpci->girq[isa_irq].dom = d;
-
-    init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
-               pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
-
-    /* Deal with GSI for legacy devices. */
-    pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
-    gdprintk(XENLOG_ERR,
-             "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n",
+
+    hvm_irq_dpci->link[link].valid = 1;
+    hvm_irq_dpci->link[link].device = device;
+    hvm_irq_dpci->link[link].intx = intx;
+    hvm_irq_dpci->link[link].machine_gsi = machine_gsi;
+
+    /* Bind the same mirq once in the same domain */
+    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
+    {
+        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
+        hvm_irq_dpci->mirq[machine_gsi].dom = d;
+
+        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
+                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
+        /* Deal with gsi for legacy devices */
+        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
+    }
+
+    gdprintk(XENLOG_INFO,
+             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
              machine_gsi, device, intx);
-
     return 0;
 }
 
@@ -150,14 +173,22 @@ void hvm_dpci_eoi(struct domain *d, unsi
         return;
 
     machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
-    stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
     device = hvm_irq_dpci->girq[guest_gsi].device;
     intx = hvm_irq_dpci->girq[guest_gsi].intx;
-    gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
-             device, intx);
     hvm_pci_intx_deassert(d, device, intx);
-    if ( (ent == NULL) || !ent->fields.mask )
-        pirq_guest_eoi(d, machine_gsi);
+ 
+    spin_lock(&hvm_irq_dpci->dirq_lock);
+    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
+    {
+        spin_unlock(&hvm_irq_dpci->dirq_lock);
+
+        gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
+        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+        if ( (ent == NULL) || !ent->fields.mask )
+            pirq_guest_eoi(d, machine_gsi);
+    }
+    else
+        spin_unlock(&hvm_irq_dpci->dirq_lock);
 }
 
 void iommu_domain_destroy(struct domain *d)
@@ -165,8 +196,9 @@ void iommu_domain_destroy(struct domain 
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t i;
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
-    struct list_head *ioport_list, *tmp;
+    struct list_head *ioport_list, *dig_list, *tmp;
     struct g2m_ioport *ioport;
+    struct dev_intx_gsi *dig;
 
     if ( !vtd_enabled )
         return;
@@ -178,7 +210,16 @@ void iommu_domain_destroy(struct domain 
             {
                 pirq_guest_unbind(d, i);
                 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
+
+                list_for_each_safe ( dig_list, tmp,
+                                     &hvm_irq_dpci->mirq[i].dig_list )
+                {
+                    dig = list_entry(dig_list, struct dev_intx_gsi, list);
+                    list_del(&dig->list);
+                    xfree(dig);
+                }
             }
+
         d->arch.hvm_domain.irq.dpci = NULL;
         xfree(hvm_irq_dpci);
     }
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/vmx/vtd/utils.c
--- a/xen/arch/x86/hvm/vmx/vtd/utils.c  Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/vmx/vtd/utils.c  Wed Nov 07 14:48:48 2007 -0700
@@ -67,25 +67,30 @@ int vtd_hw_check(void)
 /* Disable vt-d protected memory registers. */
 void disable_pmr(struct iommu *iommu)
 {
-    unsigned long start_time, status;
+    unsigned long start_time;
     unsigned int val;
 
     val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
+    if ( !(val & DMA_PMEN_PRS) )
+        return;
+
     dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
     start_time = jiffies;
 
     for ( ; ; )
     {
-        status = dmar_readl(iommu->reg, DMAR_PMEN_REG);
-        if ( (status & DMA_PMEN_PRS) == 0 )
+        val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
+        if ( (val & DMA_PMEN_PRS) == 0 )
             break;
+
         if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
-            panic("Cannot set QIE field for queue invalidation\n");
+            panic("Disable PMRs timeout\n");
+
         cpu_relax();
     }
 
     dprintk(XENLOG_INFO VTDPREFIX,
-            "disabled protected memory registers\n");
+            "Disabled protected memory registers\n");
 }
 
 #if defined(__x86_64__)
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/hvm/vpt.c    Wed Nov 07 14:48:48 2007 -0700
@@ -59,7 +59,7 @@ static void pt_process_missed_ticks(stru
     if ( mode_is(pt->vcpu->domain, no_missed_tick_accounting) )
     {
         pt->pending_intr_nr = 1;
-        pt->scheduled = now + pt->scheduled;
+        pt->scheduled = now + pt->period;
     }
     else
     {
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/mm.c Wed Nov 07 14:48:48 2007 -0700
@@ -607,10 +607,9 @@ get_##level##_linear_pagetable(         
 }
 
 
-int iomem_page_test(unsigned long mfn, struct page_info *page)
-{
-    return unlikely(!mfn_valid(mfn)) ||
-        unlikely(page_get_owner(page) == dom_io);
+int is_iomem_page(unsigned long mfn)
+{
+    return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
 }
 
 
@@ -620,19 +619,19 @@ get_page_from_l1e(
 {
     unsigned long mfn = l1e_get_pfn(l1e);
     struct page_info *page = mfn_to_page(mfn);
+    uint32_t l1f = l1e_get_flags(l1e);
     int okay;
 
-    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
+    if ( !(l1f & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely(l1e_get_flags(l1e) & l1_disallow_mask(d)) )
-    {
-        MEM_LOG("Bad L1 flags %x",
-                l1e_get_flags(l1e) & l1_disallow_mask(d));
+    if ( unlikely(l1f & l1_disallow_mask(d)) )
+    {
+        MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d));
         return 0;
     }
 
-    if ( iomem_page_test(mfn, page) )
+    if ( is_iomem_page(mfn) )
     {
         /* DOMID_IO reverts to caller for privilege checks. */
         if ( d == dom_io )
@@ -657,7 +656,7 @@ get_page_from_l1e(
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 
+    okay = (((l1f & _PAGE_RW) && 
              !(unlikely(paging_mode_external(d) && (d != current->domain))))
             ? get_page_and_type(page, d, PGT_writable_page)
             : get_page(page, d));
@@ -667,6 +666,36 @@ get_page_from_l1e(
                 " for dom%d",
                 mfn, get_gpfn_from_mfn(mfn),
                 l1e_get_intpte(l1e), d->domain_id);
+    }
+    else if ( (pte_flags_to_cacheattr(l1f) !=
+               ((page->count_info >> PGC_cacheattr_base) & 7)) &&
+              !is_iomem_page(mfn) )
+    {
+        uint32_t x, nx, y = page->count_info;
+        uint32_t cacheattr = pte_flags_to_cacheattr(l1f);
+
+        if ( is_xen_heap_frame(page) )
+        {
+            if ( (l1f & _PAGE_RW) &&
+                 !(unlikely(paging_mode_external(d) &&
+                            (d != current->domain))) )
+                put_page_type(page);
+            put_page(page);
+            MEM_LOG("Attempt to change cache attributes of Xen heap page");
+            return 0;
+        }
+
+        while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr )
+        {
+            x  = y;
+            nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
+            y  = cmpxchg(&page->count_info, x, nx);
+        }
+
+#ifdef __x86_64__
+        map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+                         PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
+#endif
     }
 
     return okay;
@@ -1825,6 +1854,24 @@ int get_page_type(struct page_info *page
     }
 
     return 1;
+}
+
+
+void cleanup_page_cacheattr(struct page_info *page)
+{
+    uint32_t cacheattr = (page->count_info >> PGC_cacheattr_base) & 7;
+
+    if ( likely(cacheattr == 0) )
+        return;
+
+    page->count_info &= ~PGC_cacheattr_mask;
+
+    BUG_ON(is_xen_heap_frame(page));
+
+#ifdef __x86_64__
+    map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page),
+                     1, PAGE_HYPERVISOR);
+#endif
 }
 
 
@@ -3803,7 +3850,7 @@ static void __memguard_change_range(void
 {
     unsigned long _p = (unsigned long)p;
     unsigned long _l = (unsigned long)l;
-    unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
+    unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
 
     /* Ensure we are dealing with a page-aligned whole number of pages. */
     ASSERT((_p&~PAGE_MASK) == 0);
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Nov 07 14:48:48 2007 -0700
@@ -716,12 +716,14 @@ _sh_propagate(struct vcpu *v,
         goto done;
     }
 
-    // Must have a valid target_mfn unless this is a prefetch.  In the
-    // case of a prefetch, an invalid mfn means that we can not usefully
-    // shadow anything, and so we return early.
+    // Must have a valid target_mfn unless this is a prefetch or an l1
+    // pointing at MMIO space.  In the case of a prefetch, an invalid
+    // mfn means that we can not usefully shadow anything, and so we
+    // return early.
     //
-    if ( shadow_mode_refcounts(d) && 
-         !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
+    if ( !mfn_valid(target_mfn)
+         && !(level == 1 && (!shadow_mode_refcounts(d) 
+                             || p2mt == p2m_mmio_direct)) )
     {
         ASSERT((ft == ft_prefetch));
         *sp = shadow_l1e_empty();
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/arch/x86/traps.c      Wed Nov 07 14:48:48 2007 -0700
@@ -1797,7 +1797,8 @@ static int emulate_privileged_op(struct 
 
         case 4: /* Write CR4 */
             v->arch.guest_context.ctrlreg[4] = pv_guest_cr4_fixup(*reg);
-            write_cr4(v->arch.guest_context.ctrlreg[4]);
+            write_cr4(pv_guest_cr4_to_real_cr4(
+                v->arch.guest_context.ctrlreg[4]));
             break;
 
         default:
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/common/grant_table.c  Wed Nov 07 14:48:48 2007 -0700
@@ -332,7 +332,7 @@ __gnttab_map_grant_ref(
     if ( op->flags & GNTMAP_host_map ) 
     {
         /* Could be an iomem page for setting up permission */
-        if ( iomem_page_test(frame, mfn_to_page(frame)) )
+        if ( is_iomem_page(frame) )
         {
             is_iomem = 1;
             if ( iomem_permit_access(ld, frame, frame) )
@@ -527,7 +527,7 @@ __gnttab_unmap_common(
                                                   op->flags)) < 0 )
                 goto unmap_out;
         }
-        else if ( iomem_page_test(op->frame, mfn_to_page(op->frame)) &&
+        else if ( is_iomem_page(op->frame) &&
                   iomem_access_permitted(ld, op->frame, op->frame) )
         {
             if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 )
@@ -1651,7 +1651,7 @@ gnttab_release_mappings(
                 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
                 act->pin -= GNTPIN_hstw_inc;
 
-                if ( iomem_page_test(act->frame, mfn_to_page(act->frame)) &&
+                if ( is_iomem_page(act->frame) &&
                      iomem_access_permitted(rd, act->frame, act->frame) )
                     rc = iomem_deny_access(rd, act->frame, act->frame);
                 else 
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/include/asm-ia64/mm.h Wed Nov 07 14:48:48 2007 -0700
@@ -185,8 +185,7 @@ static inline int get_page(struct page_i
     return 1;
 }
 
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
 
 extern void put_page_type(struct page_info *page);
 extern int get_page_type(struct page_info *page, u32 type);
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/include/asm-x86/domain.h      Wed Nov 07 14:48:48 2007 -0700
@@ -350,7 +350,14 @@ struct arch_vcpu
 /* Continue the current hypercall via func(data) on specified cpu. */
 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
 
+/* Clean up CR4 bits that are not under guest control. */
 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4);
+
+/* Convert between guest-visible and real CR4 values. */
+#define pv_guest_cr4_to_real_cr4(c) \
+    ((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)))
+#define real_cr4_to_pv_guest_cr4(c) \
+    ((c) & ~(X86_CR4_PGE | X86_CR4_PSE))
 
 #endif /* __ASM_DOMAIN_H__ */
 
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h     Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/include/asm-x86/hvm/irq.h     Wed Nov 07 14:48:48 2007 -0700
@@ -30,22 +30,35 @@
 #include <asm/hvm/vioapic.h>
 #include <public/hvm/save.h>
 
-struct hvm_irq_dpci_mapping {
+struct dev_intx_gsi {
+    struct list_head list;
+    uint8_t device;
+    uint8_t intx;
+    uint8_t gsi;
+};
+
+struct hvm_mirq_dpci_mapping {
+    uint8_t valid;
+    int pending;
+    struct list_head dig_list;
+    struct domain *dom;
+};
+
+struct hvm_girq_dpci_mapping {
     uint8_t valid;
     uint8_t device;
     uint8_t intx;
-    struct domain *dom;
-    union {
-        uint8_t guest_gsi;
-        uint8_t machine_gsi;
-    };
+    uint8_t machine_gsi;
 };
 
 struct hvm_irq_dpci {
+    spinlock_t dirq_lock;
     /* Machine IRQ to guest device/intx mapping. */
-    struct hvm_irq_dpci_mapping mirq[NR_IRQS];
+    struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
     /* Guest IRQ to guest device/intx mapping. */
-    struct hvm_irq_dpci_mapping girq[NR_IRQS];
+    struct hvm_girq_dpci_mapping girq[NR_IRQS];
+    /* Link to guest device/intx mapping. */
+    struct hvm_girq_dpci_mapping link[4];
     DECLARE_BITMAP(dirq_mask, NR_IRQS);
     struct timer hvm_timer[NR_IRQS];
 };
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/include/asm-x86/mm.h  Wed Nov 07 14:48:48 2007 -0700
@@ -84,25 +84,23 @@ struct page_info
 #define _PGT_pae_xen_l2     26
 #define PGT_pae_xen_l2      (1U<<_PGT_pae_xen_l2)
 
- /* 16-bit count of uses of this frame as its current type. */
-#define PGT_count_mask      ((1U<<16)-1)
+ /* 26-bit count of uses of this frame as its current type. */
+#define PGT_count_mask      ((1U<<26)-1)
 
  /* Cleared when the owning guest 'frees' this page. */
 #define _PGC_allocated      31
 #define PGC_allocated       (1U<<_PGC_allocated)
  /* Set on a *guest* page to mark it out-of-sync with its shadow */
-#define _PGC_out_of_sync     30
+#define _PGC_out_of_sync    30
 #define PGC_out_of_sync     (1U<<_PGC_out_of_sync)
  /* Set when is using a page as a page table */
-#define _PGC_page_table      29
+#define _PGC_page_table     29
 #define PGC_page_table      (1U<<_PGC_page_table)
- /* 29-bit count of references to this frame. */
-#define PGC_count_mask      ((1U<<29)-1)
-
-/* We trust the slab allocator in slab.c, and our use of it. */
-#define PageSlab(page)     (1)
-#define PageSetSlab(page)   ((void)0)
-#define PageClearSlab(page) ((void)0)
+ /* 3-bit PAT/PCD/PWT cache-attribute hint. */
+#define PGC_cacheattr_base  26
+#define PGC_cacheattr_mask  (7U<<PGC_cacheattr_base)
+ /* 26-bit count of references to this frame. */
+#define PGC_count_mask      ((1U<<26)-1)
 
 #define is_xen_heap_frame(pfn) ({                                       \
     paddr_t maddr = page_to_maddr(pfn);                                 \
@@ -147,6 +145,8 @@ void free_page_type(struct page_info *pa
 void free_page_type(struct page_info *page, unsigned long type);
 int _shadow_mode_refcounts(struct domain *d);
 
+void cleanup_page_cacheattr(struct page_info *page);
+
 static inline void put_page(struct page_info *page)
 {
     u32 nx, x, y = page->count_info;
@@ -158,7 +158,10 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
+    {
+        cleanup_page_cacheattr(page);
         free_domheap_page(page);
+    }
 }
 
 
@@ -196,8 +199,7 @@ static inline int get_page(struct page_i
     return 1;
 }
 
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
 
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
diff -r ef6415fdaf8a -r a1247c2df2b4 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Wed Nov 07 11:01:23 2007 -0700
+++ b/xen/include/asm-x86/page.h        Wed Nov 07 14:48:48 2007 -0700
@@ -360,6 +360,16 @@ int map_pages_to_xen(
     unsigned int flags);
 void destroy_xen_mappings(unsigned long v, unsigned long e);
 
+/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
+static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
+{
+    return ((flags >> 5) & 4) | ((flags >> 3) & 3);
+}
+static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
+{
+    return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #define PFN_DOWN(x)   ((x) >> PAGE_SHIFT)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog