WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] vtd: adding support for multiple queued i

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] vtd: adding support for multiple queued invalidation pages
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 10 Feb 2009 01:35:40 -0800
Delivery-date: Tue, 10 Feb 2009 01:37:04 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1234189431 0
# Node ID 1eb6afcad849550242a6cb874dd20a2515dc6a9a
# Parent  1d4ce9e31fa00cda2ee31ec5af9d5d0fc6880b11
vtd: adding support for multiple queued invalidation pages

Signed-off-by: Allen Kay <allen.m.kay@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/ia64/vtd.c |    7 ++++---
 xen/drivers/passthrough/vtd/intremap.c |    2 +-
 xen/drivers/passthrough/vtd/iommu.c    |   14 +++++++-------
 xen/drivers/passthrough/vtd/iommu.h    |    4 +++-
 xen/drivers/passthrough/vtd/qinval.c   |    4 +++-
 xen/drivers/passthrough/vtd/vtd.h      |    4 ++--
 xen/drivers/passthrough/vtd/x86/vtd.c  |    9 +++++----
 7 files changed, 25 insertions(+), 19 deletions(-)

diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/ia64/vtd.c
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c    Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c    Mon Feb 09 14:23:51 2009 +0000
@@ -45,16 +45,17 @@ void unmap_vtd_domain_page(void *va)
 }
 
 /* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
 {
     struct page_info *pg;
     u64 *vaddr;
 
-    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+                             d ? MEMF_node(domain_to_node(d)) : 0);
     vaddr = map_domain_page(page_to_mfn(pg));
     if ( !vaddr )
         return 0;
-    memset(vaddr, 0, PAGE_SIZE);
+    memset(vaddr, 0, PAGE_SIZE * npages);
 
     iommu_flush_cache_page(vaddr);
     unmap_domain_page(vaddr);
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/intremap.c
--- a/xen/drivers/passthrough/vtd/intremap.c    Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/intremap.c    Mon Feb 09 14:23:51 2009 +0000
@@ -502,7 +502,7 @@ int intremap_setup(struct iommu *iommu)
     ir_ctrl = iommu_ir_ctrl(iommu);
     if ( ir_ctrl->iremap_maddr == 0 )
     {
-        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL);
+        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
         if ( ir_ctrl->iremap_maddr == 0 )
         {
             dprintk(XENLOG_WARNING VTDPREFIX,
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c       Mon Feb 09 14:23:51 2009 +0000
@@ -129,9 +129,9 @@ void iommu_flush_cache_entry(void *addr)
     __iommu_flush_cache(addr, 8);
 }
 
-void iommu_flush_cache_page(void *addr)
-{
-    __iommu_flush_cache(addr, PAGE_SIZE_4K);
+void iommu_flush_cache_page(void *addr, unsigned long npages)
+{
+    __iommu_flush_cache(addr, PAGE_SIZE_4K * npages);
 }
 
 int nr_iommus;
@@ -146,7 +146,7 @@ static u64 bus_to_context_maddr(struct i
     root = &root_entries[bus];
     if ( !root_present(*root) )
     {
-        maddr = alloc_pgtable_maddr(NULL);
+        maddr = alloc_pgtable_maddr(NULL, 1);
         if ( maddr == 0 )
         {
             unmap_vtd_domain_page(root_entries);
@@ -174,7 +174,7 @@ static u64 addr_to_dma_page_maddr(struct
     addr &= (((u64)1) << addr_width) - 1;
     ASSERT(spin_is_locked(&hd->mapping_lock));
     if ( hd->pgd_maddr == 0 )
-        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain)) == 0) )
+        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain, 1)) == 0) 
)
             goto out;
 
     parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
@@ -187,7 +187,7 @@ static u64 addr_to_dma_page_maddr(struct
         {
             if ( !alloc )
                 break;
-            maddr = alloc_pgtable_maddr(domain);
+            maddr = alloc_pgtable_maddr(domain, 1);
             if ( !maddr )
                 break;
             dma_set_pte_addr(*pte, maddr);
@@ -577,7 +577,7 @@ static int iommu_set_root_entry(struct i
     spin_lock(&iommu->lock);
 
     if ( iommu->root_maddr == 0 )
-        iommu->root_maddr = alloc_pgtable_maddr(NULL);
+        iommu->root_maddr = alloc_pgtable_maddr(NULL, 1);
     if ( iommu->root_maddr == 0 )
     {
         spin_unlock(&iommu->lock);
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h       Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.h       Mon Feb 09 14:23:51 2009 +0000
@@ -397,7 +397,9 @@ struct poll_info {
     u32 udata;
 };
 
-#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
+#define MAX_QINVAL_PAGES 8
+#define NUM_QINVAL_PAGES 1
+#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct 
qinval_entry))
 #define qinval_present(v) ((v).lo & 1)
 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
 
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/qinval.c
--- a/xen/drivers/passthrough/vtd/qinval.c      Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/qinval.c      Mon Feb 09 14:23:51 2009 +0000
@@ -427,7 +427,7 @@ int qinval_setup(struct iommu *iommu)
 
     if ( qi_ctrl->qinval_maddr == 0 )
     {
-        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL);
+        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
         if ( qi_ctrl->qinval_maddr == 0 )
         {
             dprintk(XENLOG_WARNING VTDPREFIX,
@@ -445,6 +445,8 @@ int qinval_setup(struct iommu *iommu)
      * registers are automatically reset to 0 with write
      * to IQA register.
      */
+    if ( NUM_QINVAL_PAGES <= MAX_QINVAL_PAGES )
+        qi_ctrl->qinval_maddr |= NUM_QINVAL_PAGES - 1;
     dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
 
     /* enable queued invalidation hardware */
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/vtd.h
--- a/xen/drivers/passthrough/vtd/vtd.h Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/vtd.h Mon Feb 09 14:23:51 2009 +0000
@@ -101,12 +101,12 @@ void cacheline_flush(char *);
 void cacheline_flush(char *);
 void flush_all_cache(void);
 void *map_to_nocache_virt(int nr_iommus, u64 maddr);
-u64 alloc_pgtable_maddr(struct domain *d);
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages);
 void free_pgtable_maddr(u64 maddr);
 void *map_vtd_domain_page(u64 maddr);
 void unmap_vtd_domain_page(void *va);
 
 void iommu_flush_cache_entry(void *addr);
-void iommu_flush_cache_page(void *addr);
+void iommu_flush_cache_page(void *addr, unsigned long npages);
 
 #endif // _VTD_H_
diff -r 1d4ce9e31fa0 -r 1eb6afcad849 xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Mon Feb 09 14:22:07 2009 +0000
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Mon Feb 09 14:23:51 2009 +0000
@@ -38,20 +38,21 @@ void unmap_vtd_domain_page(void *va)
 }
 
 /* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
 {
     struct page_info *pg;
     u64 *vaddr;
     unsigned long mfn;
 
-    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+                             d ? MEMF_node(domain_to_node(d)) : 0);
     if ( !pg )
         return 0;
     mfn = page_to_mfn(pg);
     vaddr = map_domain_page(mfn);
-    memset(vaddr, 0, PAGE_SIZE);
+    memset(vaddr, 0, PAGE_SIZE * npages);
 
-    iommu_flush_cache_page(vaddr);
+    iommu_flush_cache_page(vaddr, npages);
     unmap_domain_page(vaddr);
 
     return (u64)mfn << PAGE_SHIFT_4K;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] vtd: adding support for multiple queued invalidation pages, Xen patchbot-unstable <=