diff -r db943e8d1051 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Tue Apr 01 10:09:33 2008 +0100 +++ b/xen/arch/x86/mm/p2m.c Wed Apr 02 15:16:19 2008 +0200 @@ -264,10 +264,13 @@ set_p2m_entry(struct domain *d, unsigned } else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) { - if ( p2mt == p2m_ram_rw ) - iommu_map_page(d, gfn, mfn_x(mfn)); - else - iommu_unmap_page(d, gfn); + if ( !list_empty(&domain_hvm_iommu(d)->pdev_list) ) + { + if ( p2mt == p2m_ram_rw ) + iommu_map_page(d, gfn, mfn_x(mfn)); + else + iommu_unmap_page(d, gfn); + } } } diff -r db943e8d1051 xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Tue Apr 01 10:09:33 2008 +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Apr 02 15:16:19 2008 +0200 @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include "../pci-direct.h" #include "../pci_regs.h" @@ -550,11 +552,33 @@ static int reassign_device( struct domai return 0; } +static int construct_p2m_mapping(struct domain *d) +{ + p2m_type_t type; + mfn_t mfn; + unsigned long gfn; + + p2m_lock(d); + + for ( gfn = 0; gfn < d->tot_pages; gfn++ ) + { + mfn = gfn_to_mfn(d, gfn, &type); + if ( type == p2m_ram_rw ) + amd_iommu_map_page(d, gfn, mfn_x(mfn)); + } + + p2m_unlock(d); + return 0; +} + int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) { int bdf = (bus << 8) | devfn; - int req_id; - req_id = ivrs_mappings[bdf].dte_requestor_id; + int req_id = ivrs_mappings[bdf].dte_requestor_id; + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if ( is_hvm_domain(d) && list_empty(&hd->pdev_list) ) + construct_p2m_mapping(d); if ( ivrs_mappings[req_id].unity_map_enable ) { diff -r db943e8d1051 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Tue Apr 01 10:09:33 2008 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Apr 02 15:16:19 2008 +0200 @@ -109,4 +109,28 @@ static inline unsigned long region_to_pa return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT; } +/* copy p2m_lock definition from p2m.c */ + +#define p2m_lock(_d) \ + do { \ + if ( unlikely((_d)->arch.p2m.locker == current->processor) )\ + { \ + printk("Error: p2m lock held by %s\n", \ + (_d)->arch.p2m.locker_function); \ + BUG(); \ + } \ + spin_lock(&(_d)->arch.p2m.lock); \ + ASSERT((_d)->arch.p2m.locker == -1); \ + (_d)->arch.p2m.locker = current->processor; \ + (_d)->arch.p2m.locker_function = __func__; \ + } while (0) + +#define p2m_unlock(_d) \ + do { \ + ASSERT((_d)->arch.p2m.locker == current->processor); \ + (_d)->arch.p2m.locker = -1; \ + (_d)->arch.p2m.locker_function = "nobody"; \ + spin_unlock(&(_d)->arch.p2m.lock); \ + } while (0) + #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */