# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1243585170 -3600
# Node ID f3bed18decfcc86f075e436a369a3556f4bef970
# Parent fe84a14aacd1bee4050b56cdff98321711cd38e1
[VTD] laying the ground work for ATS
These changes lay the ground work for ATS enabling in Xen. It will be
followed by patch which enables PCI MMCFG which is needed for actual
enabling of ATS functionality.
Signed-off-by: Allen Kay <allen.m.kay@xxxxxxxxx>
---
xen/drivers/passthrough/vtd/dmar.c | 36 +---
xen/drivers/passthrough/vtd/extern.h | 5
xen/drivers/passthrough/vtd/ia64/Makefile | 1
xen/drivers/passthrough/vtd/ia64/ats.c | 66 +++++++
xen/drivers/passthrough/vtd/iommu.c | 82 ++++++---
xen/drivers/passthrough/vtd/iommu.h | 2
xen/drivers/passthrough/vtd/qinval.c | 15 +
xen/drivers/passthrough/vtd/x86/Makefile | 1
xen/drivers/passthrough/vtd/x86/ats.c | 254 ++++++++++++++++++++++++++++++
xen/include/xen/pci_regs.h | 3
10 files changed, 409 insertions(+), 56 deletions(-)
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/dmar.c
--- a/xen/drivers/passthrough/vtd/dmar.c Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/dmar.c Fri May 29 09:19:30 2009 +0100
@@ -156,7 +156,7 @@ struct acpi_drhd_unit * acpi_find_matche
{
u8 bus, devfn;
struct acpi_drhd_unit *drhd;
- struct acpi_drhd_unit *found = NULL, *include_all = NULL;
+ struct acpi_drhd_unit *include_all = NULL;
int i;
if (pdev->info.is_extfn) {
@@ -177,35 +177,28 @@ struct acpi_drhd_unit * acpi_find_matche
return drhd;
if ( test_bit(bus, drhd->scope.buses) )
- found = drhd;
+ return drhd;
if ( drhd->include_all )
include_all = drhd;
}
-
- return found ? found : include_all;
+ return include_all;
}
struct acpi_atsr_unit * acpi_find_matched_atsr_unit(u8 bus, u8 devfn)
{
struct acpi_atsr_unit *atsr;
- struct acpi_atsr_unit *found = NULL, *include_all = NULL;
- int i;
+ struct acpi_atsr_unit *all_ports = NULL;
list_for_each_entry ( atsr, &acpi_atsr_units, list )
{
- for (i = 0; i < atsr->scope.devices_cnt; i++)
- if ( atsr->scope.devices[i] == PCI_BDF2(bus, devfn) )
- return atsr;
-
if ( test_bit(bus, atsr->scope.buses) )
- found = atsr;
+ return atsr;
if ( atsr->all_ports )
- include_all = atsr;
- }
-
- return found ? found : include_all;
+ all_ports = atsr;
+ }
+ return all_ports;
}
/*
@@ -227,7 +220,8 @@ static int scope_device_count(void *star
return -EINVAL;
}
- if ( scope->dev_type == ACPI_DEV_ENDPOINT ||
+ if ( scope->dev_type == ACPI_DEV_P2PBRIDGE ||
+ scope->dev_type == ACPI_DEV_ENDPOINT ||
scope->dev_type == ACPI_DEV_IOAPIC ||
scope->dev_type == ACPI_DEV_MSI_HPET )
count++;
@@ -286,19 +280,18 @@ static int __init acpi_parse_dev_scope(v
"found bridge: bdf = %x:%x.%x sec = %x sub = %x\n",
bus, path->dev, path->fn, sec_bus, sub_bus);
+ dmar_scope_add_buses(scope, acpi_scope->start_bus,
acpi_scope->start_bus);
dmar_scope_add_buses(scope, sec_bus, sub_bus);
break;
case ACPI_DEV_MSI_HPET:
dprintk(XENLOG_INFO VTDPREFIX, "found MSI HPET: bdf = %x:%x.%x\n",
bus, path->dev, path->fn);
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
break;
case ACPI_DEV_ENDPOINT:
dprintk(XENLOG_INFO VTDPREFIX, "found endpoint: bdf = %x:%x.%x\n",
bus, path->dev, path->fn);
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
break;
case ACPI_DEV_IOAPIC:
@@ -318,10 +311,9 @@ static int __init acpi_parse_dev_scope(v
list_add(&acpi_ioapic_unit->list, &drhd->ioapic_list);
}
- scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
- break;
- }
-
+ break;
+ }
+ scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
start += acpi_scope->length;
}
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/extern.h
--- a/xen/drivers/passthrough/vtd/extern.h Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/extern.h Fri May 29 09:19:30 2009 +0100
@@ -23,6 +23,8 @@
#include "dmar.h"
+extern int qinval_enabled;
+extern int ats_enabled;
extern struct qi_ctrl *qi_ctrl;
extern struct ir_ctrl *ir_ctrl;
@@ -46,5 +48,8 @@ struct iommu * ioapic_to_iommu(unsigned
struct iommu * ioapic_to_iommu(unsigned int apic_id);
struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
void clear_fault_bits(struct iommu *iommu);
+int qinval_device_iotlb(struct iommu *iommu,
+ u32 max_invs_pend, u16 sid, u16 size, u64 addr);
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu);
#endif // _VTD_EXTERN_H_
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/ia64/Makefile
--- a/xen/drivers/passthrough/vtd/ia64/Makefile Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/ia64/Makefile Fri May 29 09:19:30 2009 +0100
@@ -1,1 +1,2 @@ obj-y += vtd.o
obj-y += vtd.o
+obj-y += ats.o
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/ia64/ats.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/drivers/passthrough/vtd/ia64/ats.c Fri May 29 09:19:30 2009 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Allen Kay <allen.m.kay@xxxxxxxxx>
+ */
+
+#include <xen/sched.h>
+#include <xen/iommu.h>
+#include <xen/time.h>
+#include <xen/pci.h>
+#include <xen/pci_regs.h>
+#include <asm/msi.h>
+#include "../iommu.h"
+#include "../dmar.h"
+#include "../vtd.h"
+#include "../extern.h"
+
+int ats_enabled = 0;
+
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
+{
+ return NULL;
+}
+
+/*
+ * BUGBUG: return 0 until pcimmcfg is checked in.
+ */
+int pci_find_ext_capability(int seg, int bus, int devfn, int cap)
+{
+ return 0;
+}
+
+int ats_device(int seg, int bus, int devfn)
+{
+ return 0;
+}
+
+int enable_ats_device(int seg, int bus, int devfn)
+{
+ return 0;
+}
+
+static int device_in_domain(struct iommu *iommu,
+ struct pci_ats_dev *pdev, u16 did)
+{
+ return 0;
+}
+
+int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
+{
+ return 0;
+}
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c Fri May 29 09:19:30 2009 +0100
@@ -258,7 +258,7 @@ static int flush_context_reg(
static int flush_context_reg(
void *_iommu,
u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry)
{
struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
@@ -271,7 +271,7 @@ static int flush_context_reg(
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
@@ -318,35 +318,35 @@ static int flush_context_reg(
}
static int inline iommu_flush_context_global(
- struct iommu *iommu, int non_present_entry_flush)
+ struct iommu *iommu, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
static int inline iommu_flush_context_domain(
- struct iommu *iommu, u16 did, int non_present_entry_flush)
+ struct iommu *iommu, u16 did, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
static int inline iommu_flush_context_device(
struct iommu *iommu, u16 did, u16 source_id,
- u8 function_mask, int non_present_entry_flush)
+ u8 function_mask, int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, did, source_id, function_mask,
DMA_CCMD_DEVICE_INVL,
- non_present_entry_flush);
+ flush_non_present_entry);
}
/* return value determine if we need a write buffer flush */
static int flush_iotlb_reg(void *_iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+ u64 addr, unsigned int size_order, u64 type,
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
@@ -360,7 +360,7 @@ static int flush_iotlb_reg(void *_iommu,
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
@@ -421,19 +421,19 @@ static int flush_iotlb_reg(void *_iommu,
}
static int inline iommu_flush_iotlb_global(struct iommu *iommu,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- non_present_entry_flush);
+ flush_non_present_entry, flush_dev_iotlb);
}
static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
+ flush_non_present_entry, flush_dev_iotlb);
}
static int inline get_alignment(u64 base, unsigned int size)
@@ -452,8 +452,8 @@ static int inline get_alignment(u64 base
}
static int inline iommu_flush_iotlb_psi(
- struct iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+ struct iommu *iommu, u16 did, u64 addr, unsigned int pages,
+ int flush_non_present_entry, int flush_dev_iotlb)
{
unsigned int align;
struct iommu_flush *flush = iommu_get_flush(iommu);
@@ -463,8 +463,7 @@ static int inline iommu_flush_iotlb_psi(
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
flush_dev_iotlb);
/*
* PSI requires page size is 2 ^ x, and the base address is naturally
@@ -473,27 +472,28 @@ static int inline iommu_flush_iotlb_psi(
align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
/* Fallback to domain selective flush if size is too big */
if ( align > cap_max_amask_val(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
flush_dev_iotlb);
addr >>= PAGE_SHIFT_4K + align;
addr <<= PAGE_SHIFT_4K + align;
- return flush->iotlb(iommu, did, addr, align,
- DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+ return flush->iotlb(iommu, did, addr, align, DMA_TLB_PSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
}
void iommu_flush_all(void)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
+ int flush_dev_iotlb;
flush_all_cache();
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
iommu_flush_context_global(iommu, 0);
- iommu_flush_iotlb_global(iommu, 0);
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
}
}
@@ -505,6 +505,7 @@ static void dma_pte_clear_one(struct dom
struct iommu *iommu;
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
+ int flush_dev_iotlb;
spin_lock(&hd->mapping_lock);
/* get last level pte */
@@ -534,9 +535,12 @@ static void dma_pte_clear_one(struct dom
{
iommu = drhd->iommu;
if ( test_bit(iommu->index, &hd->iommu_bitmap) )
+ {
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
- addr, 1, 0))
+ addr, 1, 0, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
+ }
}
unmap_vtd_domain_page(page);
@@ -926,6 +930,10 @@ static int iommu_alloc(struct acpi_drhd_
iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "drhd->address = %"PRIx64"\n", drhd->address);
+ gdprintk(XENLOG_INFO VTDPREFIX, "iommu->reg = %p\n", iommu->reg);
+
/* Calculate number of pagetable levels: between 2 and 4. */
sagaw = cap_sagaw(iommu->cap);
for ( agaw = level_to_agaw(4); agaw >= 0; agaw-- )
@@ -1079,7 +1087,11 @@ static int domain_context_mapping_one(
}
context_set_address_root(*context, pgd_maddr);
- context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+ if ( ats_enabled && ecap_dev_iotlb(iommu->ecap) )
+ context_set_translation_type(*context, CONTEXT_TT_DEV_IOTLB);
+ else
+ context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+
spin_unlock(&hd->mapping_lock);
}
@@ -1099,7 +1111,10 @@ static int domain_context_mapping_one(
DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, 0, 1);
+ {
+ int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
+ }
set_bit(iommu->index, &hd->iommu_bitmap);
@@ -1322,7 +1337,10 @@ static int domain_context_unmap_one(
DMA_CCMD_MASK_NOBIT, 0) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+ {
+ int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0,
flush_dev_iotlb);
+ }
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
@@ -1463,6 +1481,7 @@ int intel_iommu_map_page(
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
int pte_present;
+ int flush_dev_iotlb;
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
@@ -1504,9 +1523,10 @@ int intel_iommu_map_page(
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
continue;
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
(paddr_t)gfn << PAGE_SHIFT_4K, 1,
- !pte_present) )
+ !pte_present, flush_dev_iotlb) )
iommu_flush_write_buffer(iommu);
}
@@ -1643,6 +1663,10 @@ static void setup_dom0_devices(struct do
pdev->domain = d;
list_add(&pdev->domain_list, &d->arch.pdev_list);
domain_context_mapping(d, pdev->bus, pdev->devfn);
+#if defined(NOT_YET)
+ if ( ats_device(0, pdev->bus, pdev->devfn) )
+ enable_ats_device(0, pdev->bus, pdev->devfn);
+#endif
}
}
}
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.h Fri May 29 09:19:30 2009 +0100
@@ -455,7 +455,7 @@ struct iommu_flush {
int (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
- u64 type, int non_present_entry_flush);
+ u64 type, int flush_non_present_entry, int flush_dev_iotlb);
};
struct intel_iommu {
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/qinval.c
--- a/xen/drivers/passthrough/vtd/qinval.c Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/qinval.c Fri May 29 09:19:30 2009 +0100
@@ -29,6 +29,8 @@
#include "vtd.h"
#include "extern.h"
+int qinval_enabled;
+
static void print_qi_regs(struct iommu *iommu)
{
u64 val;
@@ -343,7 +345,7 @@ int iommu_flush_iec_index(struct iommu *
static int flush_context_qi(
void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry)
{
int ret = 0;
struct iommu *iommu = (struct iommu *)_iommu;
@@ -355,7 +357,7 @@ static int flush_context_qi(
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
@@ -375,7 +377,7 @@ static int flush_iotlb_qi(
static int flush_iotlb_qi(
void *_iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+ int flush_non_present_entry, int flush_dev_iotlb)
{
u8 dr = 0, dw = 0;
int ret = 0;
@@ -388,7 +390,7 @@ static int flush_iotlb_qi(
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
- if ( non_present_entry_flush )
+ if ( flush_non_present_entry )
{
if ( !cap_caching_mode(iommu->cap) )
return 1;
@@ -407,6 +409,10 @@ static int flush_iotlb_qi(
ret = queue_invalidate_iotlb(iommu,
(type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
dw, did, (u8)size_order, 0, addr);
+#if defined(NOT_YET)
+ if ( flush_dev_iotlb )
+ ret |= dev_invalidate_iotlb(iommu, did, addr, size_order, type);
+#endif
ret |= invalidate_sync(iommu);
}
return ret;
@@ -462,6 +468,7 @@ int enable_qinval(struct iommu *iommu)
cpu_relax();
}
+ qinval_enabled = 1;
return 0;
}
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/x86/Makefile
--- a/xen/drivers/passthrough/vtd/x86/Makefile Thu May 28 11:07:19 2009 +0100
+++ b/xen/drivers/passthrough/vtd/x86/Makefile Fri May 29 09:19:30 2009 +0100
@@ -1,1 +1,2 @@ obj-y += vtd.o
obj-y += vtd.o
+obj-y += ats.o
diff -r fe84a14aacd1 -r f3bed18decfc xen/drivers/passthrough/vtd/x86/ats.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/drivers/passthrough/vtd/x86/ats.c Fri May 29 09:19:30 2009 +0100
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Allen Kay <allen.m.kay@xxxxxxxxx>
+ */
+
+#include <xen/sched.h>
+#include <xen/iommu.h>
+#include <xen/time.h>
+#include <xen/pci.h>
+#include <xen/pci_regs.h>
+#include <asm/msi.h>
+#include "../iommu.h"
+#include "../dmar.h"
+#include "../vtd.h"
+#include "../extern.h"
+
+LIST_HEAD(ats_dev_drhd_units);
+
+#define ATS_REG_CAP 4
+#define ATS_REG_CTL 6
+#define ATS_QUEUE_DEPTH_MASK 0xF
+#define ATS_ENABLE (1<<15)
+
+struct pci_ats_dev {
+ struct list_head list;
+ u8 bus;
+ u8 devfn;
+ u16 ats_queue_depth; /* ATS device invalidation queue depth */
+ spinlock_t lock;
+};
+static LIST_HEAD(ats_devices);
+
+static void parse_ats_param(char *s);
+custom_param("ats", parse_ats_param);
+
+int ats_enabled = 1;
+
+static void parse_ats_param(char *s)
+{
+ char *ss;
+
+ do {
+ ss = strchr(s, ',');
+ if ( ss )
+ *ss = '\0';
+
+ if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
+ !strcmp(s, "0") || !strcmp(s, "disable") )
+ ats_enabled = 0;
+
+ if ( !strcmp(s, "on") || !strcmp(s, "yes") || !strcmp(s, "true") ||
+ !strcmp(s, "1") || !strcmp(s, "enable") )
+ ats_enabled = 1;
+
+ s = ss + 1;
+ } while ( ss );
+}
+
+struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
+{
+ struct acpi_drhd_unit *drhd;
+ list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
+ {
+ if ( drhd->iommu == iommu )
+ return drhd;
+ }
+ return NULL;
+}
+
+/*
+ * BUGBUG: return 0 until pcimmcfg is checked in.
+ */
+int pci_find_ext_capability(int seg, int bus, int devfn, int cap)
+{
+ return 0;
+}
+
+int ats_device(int seg, int bus, int devfn)
+{
+ struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
+ struct pci_dev *pdev;
+ int pos = 0;
+
+ if ( !ats_enabled )
+ return 0;
+
+ if ( !qinval_enabled )
+ return 0;
+
+ pdev = pci_get_pdev(bus, devfn);
+ drhd = acpi_find_matched_drhd_unit(pdev);
+ if ( !ecap_dev_iotlb(drhd->iommu->ecap) )
+ return 0;
+
+ if ( !acpi_find_matched_atsr_unit(bus, devfn) )
+ return 0;
+
+ ats_drhd = find_ats_dev_drhd(drhd->iommu);
+ pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
+
+ if ( pos && (ats_drhd == NULL) )
+ {
+ new_drhd = xmalloc(struct acpi_drhd_unit);
+ memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
+ list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
+ }
+ return pos;
+}
+
+int enable_ats_device(int seg, int bus, int devfn)
+{
+ struct pci_ats_dev *pdev;
+ u32 value;
+ u16 queue_depth;
+ int pos;
+
+ pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
+
+ if ( !pos )
+ {
+ dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return 0;
+ }
+ else
+ dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ value = pci_conf_read16(bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), pos + ATS_REG_CAP);
+ queue_depth = value & ATS_QUEUE_DEPTH_MASK;
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ value = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos +
ATS_REG_CTL);
+ value |= ATS_ENABLE;
+
+ /* BUGBUG: add back seg when multi-seg platform support is enabled */
+ pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL,
value);
+
+ if ( acpi_find_matched_atsr_unit(bus, devfn) )
+ {
+ pdev = xmalloc(struct pci_ats_dev);
+ pdev->bus = bus;
+ pdev->devfn = devfn;
+ pdev->ats_queue_depth = queue_depth;
+ list_add(&(pdev->list), &ats_devices);
+ }
+ return pos;
+}
+
+static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16
did)
+{
+ struct root_entry *root_entry = NULL;
+ struct context_entry *ctxt_entry = NULL;
+ int tt, found = 0;
+
+ root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
+ if ( !root_entry || !root_present(root_entry[pdev->bus]) )
+ goto out;
+
+ ctxt_entry = (struct context_entry *)
+ map_vtd_domain_page(root_entry[pdev->bus].val);
+
+ if ( ctxt_entry == NULL )
+ goto out;
+
+ if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
+ goto out;
+
+ tt = context_translation_type(ctxt_entry[pdev->devfn]);
+ if ( tt != CONTEXT_TT_DEV_IOTLB )
+ goto out;
+
+ found = 1;
+out:
+ if ( root_entry )
+ unmap_vtd_domain_page(root_entry);
+
+ if ( ctxt_entry )
+ unmap_vtd_domain_page(ctxt_entry);
+
+ if ( found )
+ return 1;
+
+ return 0;
+}
+
+int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
+{
+ struct pci_ats_dev *pdev;
+ int sbit, ret = 0;
+ u16 sid;
+
+ if ( !ecap_dev_iotlb(iommu->ecap) )
+ return ret;
+
+ list_for_each_entry( pdev, &ats_devices, list )
+ {
+ sid = (pdev->bus << 8) | pdev->devfn;
+
+ switch ( type ) {
+ case DMA_TLB_DSI_FLUSH:
+ if ( !device_in_domain(iommu, pdev, did) )
+ break;
+ /* fall through if DSI condition met */
+ case DMA_TLB_GLOBAL_FLUSH:
+ /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
+ sbit = 1;
+ addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
+ ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
+ break;
+ case DMA_TLB_PSI_FLUSH:
+ if ( !device_in_domain(iommu, pdev, did) )
+ break;
+
+ addr &= ~0 << (PAGE_SHIFT + size_order);
+
+ /* if size <= 4K, set sbit = 0, else set sbit = 1 */
+ sbit = size_order ? 1 : 0;
+
+ /* clear lower bits */
+ addr &= (~0 << (PAGE_SHIFT + size_order));
+
+ /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
+ if ( sbit )
+ addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
+
+ ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
+ break;
+ default:
+ dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
+ break;
+ }
+ }
+ return ret;
+}
diff -r fe84a14aacd1 -r f3bed18decfc xen/include/xen/pci_regs.h
--- a/xen/include/xen/pci_regs.h Thu May 28 11:07:19 2009 +0100
+++ b/xen/include/xen/pci_regs.h Fri May 29 09:19:30 2009 +0100
@@ -419,6 +419,9 @@
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
+#define PCI_EXT_CAP_ID_ARI 0xE
+#define PCI_EXT_CAP_ID_ATS 0xF
+#define PCI_EXT_CAP_ID_IOV 0x10
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|