diff -r da35b52c4fd7 xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Thu Apr 03 11:37:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Apr 04 14:20:41 2008 +0200 @@ -388,17 +388,17 @@ int amd_iommu_map_page(struct domain *d, unsigned long flags; u64 maddr; struct hvm_iommu *hd = domain_hvm_iommu(d); - int iw, ir; + int iw = IOMMU_IO_WRITE_ENABLED; + int ir = IOMMU_IO_READ_ENABLED; BUG_ON( !hd->root_table ); + spin_lock_irqsave(&hd->mapping_lock, flags); + + if ( is_hvm_domain(d) && (!hd->p2m_synchronized) ) + goto out; + maddr = (u64)mfn << PAGE_SHIFT; - - iw = IOMMU_IO_WRITE_ENABLED; - ir = IOMMU_IO_READ_ENABLED; - - spin_lock_irqsave(&hd->mapping_lock, flags); - pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) { @@ -409,7 +409,7 @@ int amd_iommu_map_page(struct domain *d, } set_page_table_entry_present((u32 *)pte, maddr, iw, ir); - +out: spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; } @@ -425,10 +425,16 @@ int amd_iommu_unmap_page(struct domain * BUG_ON( !hd->root_table ); + spin_lock_irqsave(&hd->mapping_lock, flags); + + if ( is_hvm_domain(d) && (!hd->p2m_synchronized) ) + { + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return 0; + } + requestor_id = hd->domain_id; io_addr = (u64)gfn << PAGE_SHIFT; - - spin_lock_irqsave(&hd->mapping_lock, flags); pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); if ( pte == NULL ) @@ -486,3 +492,47 @@ int amd_iommu_reserve_domain_unity_map( spin_unlock_irqrestore(&hd->mapping_lock, flags); return 0; } + +int amd_iommu_sync_p2m(struct domain *d) +{ + unsigned long mfn, gfn, flags; + void *pte; + u64 maddr; + struct list_head *entry; + struct page_info *page; + struct hvm_iommu *hd = domain_hvm_iommu(d); + int iw = IOMMU_IO_WRITE_ENABLED; + int ir = IOMMU_IO_READ_ENABLED; + + spin_lock_irqsave(&hd->mapping_lock, flags); + + if ( hd->p2m_synchronized ) + goto out; + + for ( entry = d->page_list.next; entry != &d->page_list; + entry = entry->next ) + { + page = list_entry(entry, struct page_info, list); + mfn = page_to_mfn(page); + gfn = get_gpfn_from_mfn(mfn); + + if ( gfn == INVALID_M2P_ENTRY ) + continue; + + maddr = (u64)mfn << PAGE_SHIFT; + pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); + if ( pte == NULL ) + { + dprintk(XENLOG_ERR, + "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return -EFAULT; + } + set_page_table_entry_present((u32 *)pte, maddr, iw, ir); + } + hd->p2m_synchronized = 1; + +out: + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return 0; +} diff -r da35b52c4fd7 xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Apr 03 11:37:23 2008 +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Apr 04 14:20:41 2008 +0200 @@ -553,8 +553,11 @@ int amd_iommu_assign_device(struct domai int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) { int bdf = (bus << 8) | devfn; - int req_id; - req_id = ivrs_mappings[bdf].dte_requestor_id; + int req_id = ivrs_mappings[bdf].dte_requestor_id; + struct hvm_iommu *hd = domain_hvm_iommu(d); + + if ( is_hvm_domain(d) && list_empty(&hd->pdev_list) ) + amd_iommu_sync_p2m(d); if ( ivrs_mappings[req_id].unity_map_enable ) { diff -r da35b52c4fd7 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Apr 03 11:37:23 2008 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Apr 04 14:20:41 2008 +0200 @@ -57,6 +57,7 @@ void *amd_iommu_get_vptr_from_page_table void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); int amd_iommu_reserve_domain_unity_map(struct domain *domain, unsigned long phys_addr, unsigned long size, int iw, int ir); +int amd_iommu_sync_p2m(struct domain *d); /* device table functions */ void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, diff -r da35b52c4fd7 xen/include/xen/hvm/iommu.h --- a/xen/include/xen/hvm/iommu.h Thu Apr 03 11:37:23 2008 +0100 +++ b/xen/include/xen/hvm/iommu.h Fri Apr 04 14:20:41 2008 +0200 @@ -48,6 +48,7 @@ struct hvm_iommu { int domain_id; int paging_mode; void *root_table; + bool_t p2m_synchronized; /* iommu_ops */ struct iommu_ops *platform_ops;