WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH][VTD] adding support for multiple queued invalidation

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH][VTD] adding support for multiple queued invalidation pages
From: "Kay, Allen M" <allen.m.kay@xxxxxxxxx>
Date: Fri, 6 Feb 2009 12:54:39 -0800
Accept-language: en-US
Acceptlanguage: en-US
Cc: "Espinoza Zapiain, Oliver" <oliver.espinoza.zapiain@xxxxxxxxx>, "Han, Weidong" <weidong.han@xxxxxxxxx>, "Cui, Dexuan" <dexuan.cui@xxxxxxxxx>
Delivery-date: Fri, 06 Feb 2009 12:55:10 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcmInSCmOO1YxnWbRgKfCIA+EzgzXQ==
Thread-topic: [PATCH][VTD] adding support for multiple queued invalidation pages
This patch added logic for supporting more than one queued invalidation pages.

Signed-off-by: Allen Kay allen.m.kay@xxxxxxxxx


=====================

diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/ia64/vtd.c
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri Feb 06 12:19:04 2009 -0800
@@ -45,16 +45,17 @@ void unmap_vtd_domain_page(void *va)
 }
 
 /* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
 {
     struct page_info *pg;
     u64 *vaddr;
 
-    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+                             d ? MEMF_node(domain_to_node(d)) : 0);
     vaddr = map_domain_page(page_to_mfn(pg));
     if ( !vaddr )
         return 0;
-    memset(vaddr, 0, PAGE_SIZE);
+    memset(vaddr, 0, PAGE_SIZE * npages);
 
     iommu_flush_cache_page(vaddr);
     unmap_domain_page(vaddr);
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/intremap.c
--- a/xen/drivers/passthrough/vtd/intremap.c    Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/intremap.c    Fri Feb 06 12:30:47 2009 -0800
@@ -502,7 +502,7 @@ int intremap_setup(struct iommu *iommu)
     ir_ctrl = iommu_ir_ctrl(iommu);
     if ( ir_ctrl->iremap_maddr == 0 )
     {
-        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL);
+        ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1);
         if ( ir_ctrl->iremap_maddr == 0 )
         {
             dprintk(XENLOG_WARNING VTDPREFIX,
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Feb 06 12:23:02 2009 -0800
@@ -129,9 +129,9 @@ void iommu_flush_cache_entry(void *addr)
     __iommu_flush_cache(addr, 8);
 }
 
-void iommu_flush_cache_page(void *addr)
-{
-    __iommu_flush_cache(addr, PAGE_SIZE_4K);
+void iommu_flush_cache_page(void *addr, unsigned long npages)
+{
+    __iommu_flush_cache(addr, PAGE_SIZE_4K * npages);
 }
 
 int nr_iommus;
@@ -146,7 +146,7 @@ static u64 bus_to_context_maddr(struct i
     root = &root_entries[bus];
     if ( !root_present(*root) )
     {
-        maddr = alloc_pgtable_maddr(NULL);
+        maddr = alloc_pgtable_maddr(NULL, 1);
         if ( maddr == 0 )
         {
             unmap_vtd_domain_page(root_entries);
@@ -174,7 +174,7 @@ static u64 addr_to_dma_page_maddr(struct
     addr &= (((u64)1) << addr_width) - 1;
     ASSERT(spin_is_locked(&hd->mapping_lock));
     if ( hd->pgd_maddr == 0 )
-        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain)) == 0) )
+        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain, 1)) == 0) 
)
             goto out;
 
     parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
@@ -187,7 +187,7 @@ static u64 addr_to_dma_page_maddr(struct
         {
             if ( !alloc )
                 break;
-            maddr = alloc_pgtable_maddr(domain);
+            maddr = alloc_pgtable_maddr(domain, 1);
             if ( !maddr )
                 break;
             dma_set_pte_addr(*pte, maddr);
@@ -577,7 +577,7 @@ static int iommu_set_root_entry(struct i
     spin_lock(&iommu->lock);
 
     if ( iommu->root_maddr == 0 )
-        iommu->root_maddr = alloc_pgtable_maddr(NULL);
+        iommu->root_maddr = alloc_pgtable_maddr(NULL, 1);
     if ( iommu->root_maddr == 0 )
     {
         spin_unlock(&iommu->lock);
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h       Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.h       Fri Feb 06 12:30:52 2009 -0800
@@ -397,7 +397,9 @@ struct poll_info {
     u32 udata;
 };
 
-#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
+#define MAX_QINVAL_PAGES 8
+#define NUM_QINVAL_PAGES 1
+#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct 
qinval_entry))
 #define qinval_present(v) ((v).lo & 1)
 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
 
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/qinval.c
--- a/xen/drivers/passthrough/vtd/qinval.c      Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/qinval.c      Fri Feb 06 12:13:31 2009 -0800
@@ -427,7 +427,7 @@ int qinval_setup(struct iommu *iommu)
 
     if ( qi_ctrl->qinval_maddr == 0 )
     {
-        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL);
+        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
         if ( qi_ctrl->qinval_maddr == 0 )
         {
             dprintk(XENLOG_WARNING VTDPREFIX,
@@ -445,6 +445,8 @@ int qinval_setup(struct iommu *iommu)
      * registers are automatically reset to 0 with write
      * to IQA register.
      */
+    if ( NUM_QINVAL_PAGES <= MAX_QINVAL_PAGES )
+        qi_ctrl->qinval_maddr |= NUM_QINVAL_PAGES - 1;
     dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);
 
     /* enable queued invalidation hardware */
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/vtd.h
--- a/xen/drivers/passthrough/vtd/vtd.h Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/vtd.h Fri Feb 06 12:22:51 2009 -0800
@@ -101,12 +101,12 @@ void cacheline_flush(char *);
 void cacheline_flush(char *);
 void flush_all_cache(void);
 void *map_to_nocache_virt(int nr_iommus, u64 maddr);
-u64 alloc_pgtable_maddr(struct domain *d);
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages);
 void free_pgtable_maddr(u64 maddr);
 void *map_vtd_domain_page(u64 maddr);
 void unmap_vtd_domain_page(void *va);
 
 void iommu_flush_cache_entry(void *addr);
-void iommu_flush_cache_page(void *addr);
+void iommu_flush_cache_page(void *addr, unsigned long npages);
 
 #endif // _VTD_H_
diff -r 57e98fc0d956 xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Fri Feb 06 11:18:45 2009 +0000
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Fri Feb 06 12:21:58 2009 -0800
@@ -38,20 +38,21 @@ void unmap_vtd_domain_page(void *va)
 }
 
 /* Allocate page table, return its machine address */
-u64 alloc_pgtable_maddr(struct domain *d)
+u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages)
 {
     struct page_info *pg;
     u64 *vaddr;
     unsigned long mfn;
 
-    pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0);
+    pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
+                             d ? MEMF_node(domain_to_node(d)) : 0);
     if ( !pg )
         return 0;
     mfn = page_to_mfn(pg);
     vaddr = map_domain_page(mfn);
-    memset(vaddr, 0, PAGE_SIZE);
+    memset(vaddr, 0, PAGE_SIZE * npages);
 
-    iommu_flush_cache_page(vaddr);
+    iommu_flush_cache_page(vaddr, npages);
     unmap_domain_page(vaddr);
 
     return (u64)mfn << PAGE_SHIFT_4K;

Attachment: mpqinval_0206.patch
Description: mpqinval_0206.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH][VTD] adding support for multiple queued invalidation pages, Kay, Allen M <=