On Monday 04 April 2011 13:05:43 Tim Deegan wrote:
> At 10:32 +0000 on 25 Mar (1301049120), Wei Wang wrote:
> > diff -r 6827fd35cf58 -r 48052c25abfd xen/arch/x86/mm/p2m.c
> > --- a/xen/arch/x86/mm/p2m.c Thu Mar 24 16:18:28 2011 +0100
> > +++ b/xen/arch/x86/mm/p2m.c Thu Mar 24 16:34:29 2011 +0100
> > @@ -34,6 +34,7 @@
> > #include <public/mem_event.h>
> > #include <asm/mem_sharing.h>
> > #include <xen/event.h>
> > +#include <asm/hvm/svm/amd-iommu-proto.h>
> >
> > /* Debugging and auditing of the P2M code? */
> > #define P2M_AUDIT 0
> > @@ -1454,15 +1455,23 @@
> > && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
> > p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
> >
> > + if ( (p2mt == p2m_ram_rw) && iommu_hap_pt_share )
> > + amd_iommu_set_p2m_next_level(p2m, gfn, mfn_x(mfn), page_order);
> > +
>
> It looks like this function maps the p2m table and walks it to fix up
> bits 9-11 of the entries, but you're calling it from a function that has
> just done the same map-and-walk. Couldn't you fold the
> next-level-setting code into p2m_set_entry?
Yeah, that is also possible. Actually I did it this way in my testing code...
should be no problem to get it back. I will send a new patch this week.
Thanks,
Wei
> Cheers,
>
> Tim.
>
> > if ( iommu_enabled && need_iommu(p2m->domain) )
> > {
> > - if ( p2mt == p2m_ram_rw )
> > - for ( i = 0; i < (1UL << page_order); i++ )
> > - iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> > - IOMMUF_readable|IOMMUF_writable);
> > + if ( iommu_hap_pt_share )
> > + amd_iommu_invalidate_pages(p2m->domain, gfn, page_order);
> > else
> > - for ( int i = 0; i < (1UL << page_order); i++ )
> > - iommu_unmap_page(p2m->domain, gfn+i);
> > + {
> > + if ( p2mt == p2m_ram_rw )
> > + for ( i = 0; i < (1UL << page_order); i++ )
> > + iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> > + IOMMUF_readable|IOMMUF_writable);
> > + else
> > + for ( int i = 0; i < (1UL << page_order); i++ )
> > + iommu_unmap_page(p2m->domain, gfn+i);
> > + }
> > }
> >
> > /* Success */
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/drivers/passthrough/amd/iommu_map.c ---
> > a/xen/drivers/passthrough/amd/iommu_map.c Thu Mar 24 16:18:28 2011 +0100
> > +++ b/xen/drivers/passthrough/amd/iommu_map.c Thu Mar 24 16:34:29 2011
> > +0100 @@ -19,6 +19,7 @@
> > */
> >
> > #include <xen/sched.h>
> > +#include <asm/p2m.h>
> > #include <xen/hvm/iommu.h>
> > #include <asm/amd-iommu.h>
> > #include <asm/hvm/svm/amd-iommu-proto.h>
> > @@ -563,6 +564,9 @@
> >
> > BUG_ON( !hd->root_table );
> >
> > + if ( iommu_hap_pt_share && is_hvm_domain(d) )
> > + return 0;
> > +
> > spin_lock(&hd->mapping_lock);
> >
> > /* Since HVM domain is initialized with 2 level IO page table,
> > @@ -603,6 +607,9 @@
> >
> > BUG_ON( !hd->root_table );
> >
> > + if ( iommu_hap_pt_share && is_hvm_domain(d) )
> > + return 0;
> > +
> > spin_lock(&hd->mapping_lock);
> >
> > /* Since HVM domain is initialized with 2 level IO page table,
> > @@ -680,3 +687,87 @@
> > spin_unlock_irqrestore(&iommu->lock, flags);
> > }
> > }
> > +
> > +/* Convert nexl level and r/w bits into 24 bits p2m flags */
> > +#define next_level_to_flags(nl) (((nl) << 9 ) | (3ULL << 21))
> > +
> > +/* Flush IOMMU TLB after p2m changes */
> > +int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
> > + unsigned int order)
> > +{
> > + unsigned long flags;
> > + struct amd_iommu *iommu;
> > + struct hvm_iommu *hd = domain_hvm_iommu(d);
> > +
> > + BUG_ON( !hd->root_table );
> > +
> > + if ( !iommu_hap_pt_share && is_hvm_domain(d) )
> > + return 0;
> > +
> > + /* send INVALIDATE_IOMMU_PAGES command */
> > + for_each_amd_iommu ( iommu )
> > + {
> > + spin_lock_irqsave(&iommu->lock, flags);
> > + invalidate_iommu_pages(iommu, gfn, hd->domain_id, order);
> > + flush_command_buffer(iommu);
> > + spin_unlock_irqrestore(&iommu->lock, flags);
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +/* Share p2m table with iommu */
> > +void amd_iommu_set_p2m_table(struct domain *d)
> > +{
> > + struct hvm_iommu *hd = domain_hvm_iommu(d);
> > + mfn_t pgd_mfn;
> > +
> > + ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
> > +
> > + if ( hd->root_table != NULL )
> > + free_amd_iommu_pgtable(hd->root_table);
> > +
> > + pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
> > + hd->root_table = mfn_to_page(mfn_x(pgd_mfn));
> > +
> > + iommu_hap_pt_share = 1;
> > + AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
> > + mfn_x(pgd_mfn));
> > +}
> > +
> > +/* Fix next level fields in p2m entries for iommu use */
> > +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long
> > gfn, + unsigned long mfn, unsigned int
> > page_order) +{
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +
> > + mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> > + void *table = NULL;
> > + unsigned int level = CONFIG_PAGING_LEVELS;
> > + unsigned int lowest = page_order / PTE_PER_TABLE_SHIFT + 1;
> > + l1_pgentry_t *p2m_entry;
> > +
> > + if ( !mfn_valid(mfn) )
> > + return;
> > +
> > + while ( level >= lowest )
> > + {
> > + u32 next_level;
> > + u32 offset = gfn >> ((PTE_PER_TABLE_SHIFT *
> > + (level - IOMMU_PAGING_MODE_LEVEL_1)));
> > + offset &= ~PTE_PER_TABLE_MASK;
> > +
> > + table = map_domain_page(mfn_x(table_mfn));
> > +
> > + p2m_entry = (l1_pgentry_t *)table + offset;
> > + next_level = (level == lowest) ? 0 : level - 1;
> > + l1e_add_flags(*p2m_entry, next_level_to_flags(next_level));
> > + table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
> > +
> > + unmap_domain_page(table);
> > +
> > + level--;
> > + }
> > +
> > +#endif
> > +}
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/drivers/passthrough/amd/pci_amd_iommu.c ---
> > a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Mar 24 16:18:28 2011
> > +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Mar 24
> > 16:34:29 2011 +0100 @@ -362,6 +362,9 @@
> > {
> > struct hvm_iommu *hd = domain_hvm_iommu(d);
> >
> > + if ( iommu_hap_pt_share )
> > + return;
> > +
> > spin_lock(&hd->mapping_lock);
> > if ( hd->root_table )
> > {
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/include/asm-x86/hvm/svm/amd-iommu-proto.h ---
> > a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Mar 24 16:18:28 2011
> > +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Mar 24
> > 16:34:29 2011 +0100 @@ -24,6 +24,7 @@
> > #include <xen/sched.h>
> > #include <asm/amd-iommu.h>
> > #include <xen/domain_page.h>
> > +#include <asm/p2m.h>
> >
> > #define for_each_amd_iommu(amd_iommu) \
> > list_for_each_entry(amd_iommu, \
> > @@ -92,6 +93,13 @@
> > void amd_iommu_resume(void);
> > void amd_iommu_suspend(void);
> >
> > +/* Share p2m table with iommu */
> > +void amd_iommu_set_p2m_table(struct domain *d);
> > +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long
> > gfn, + unsigned long mfn, unsigned int
> > page_order); +int amd_iommu_invalidate_pages(struct domain *d, unsigned
> > long gfn, + unsigned int order);
> > +
> > static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32
> > shift) {
> > u32 field;
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@xxxxxxxxxxxxxxxxxxx
> > http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|