# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1321610860 -3600
# Node ID 6ef12cc262fb3ac11f9ea58707afb4f7778017ac
# Parent 135678ae1bbc02171294c21e2a5469d898d2a353
amd iommu: Factor out iommu command handling functions,
and move them into a new file.
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/Makefile
--- a/xen/drivers/passthrough/amd/Makefile Thu Nov 17 15:52:03 2011 +0100
+++ b/xen/drivers/passthrough/amd/Makefile Fri Nov 18 11:07:40 2011 +0100
@@ -4,3 +4,4 @@ obj-y += iommu_map.o
obj-y += pci_amd_iommu.o
obj-bin-y += iommu_acpi.init.o
obj-y += iommu_intr.o
+obj-y += iommu_cmd.o
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/iommu_cmd.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/drivers/passthrough/amd/iommu_cmd.c Fri Nov 18 11:07:40 2011 +0100
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2011 Advanced Micro Devices, Inc.
+ * Author: Leo Duran <leo.duran@xxxxxxx>
+ * Author: Wei Wang <wei.wang2@xxxxxxx> - adapted to xen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/sched.h>
+#include <xen/hvm/iommu.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+#include "../ats.h"
+
+static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
+{
+ u32 tail, head, *cmd_buffer;
+ int i;
+
+ tail = iommu->cmd_buffer_tail;
+ if ( ++tail == iommu->cmd_buffer.entries )
+ tail = 0;
+
+ head = get_field_from_reg_u32(readl(iommu->mmio_base +
+ IOMMU_CMD_BUFFER_HEAD_OFFSET),
+ IOMMU_CMD_BUFFER_HEAD_MASK,
+ IOMMU_CMD_BUFFER_HEAD_SHIFT);
+ if ( head != tail )
+ {
+ cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
+ (iommu->cmd_buffer_tail *
+ IOMMU_CMD_BUFFER_ENTRY_SIZE));
+
+ for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
+ cmd_buffer[i] = cmd[i];
+
+ iommu->cmd_buffer_tail = tail;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void commit_iommu_command_buffer(struct amd_iommu *iommu)
+{
+ u32 tail;
+
+ set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
+ IOMMU_CMD_BUFFER_TAIL_MASK,
+ IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
+ writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
+}
+
+int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
+{
+ if ( queue_iommu_command(iommu, cmd) )
+ {
+ commit_iommu_command_buffer(iommu);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void flush_command_buffer(struct amd_iommu *iommu)
+{
+ u32 cmd[4], status;
+ int loop_count, comp_wait;
+
+ /* clear 'ComWaitInt' in status register (WIC) */
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
+ IOMMU_STATUS_COMP_WAIT_INT_MASK,
+ IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
+ writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+
+ /* send an empty COMPLETION_WAIT command to flush command buffer */
+ cmd[3] = cmd[2] = 0;
+ set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
+ IOMMU_CMD_OPCODE_MASK,
+ IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
+ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
+ IOMMU_COMP_WAIT_I_FLAG_MASK,
+ IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
+ send_iommu_command(iommu, cmd);
+
+ /* Make loop_count long enough for polling completion wait bit */
+ loop_count = 1000;
+ do {
+ status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+ comp_wait = get_field_from_reg_u32(status,
+ IOMMU_STATUS_COMP_WAIT_INT_MASK,
+ IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
+ --loop_count;
+ } while ( !comp_wait && loop_count );
+
+ if ( comp_wait )
+ {
+ /* clear 'ComWaitInt' in status register (WIC) */
+ status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
+ writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+ return;
+ }
+ AMD_IOMMU_DEBUG("Warning: ComWaitInt bit did not assert!\n");
+}
+
+/* Build low level iommu command messages */
+static void invalidate_iommu_pages(struct amd_iommu *iommu,
+ u64 io_addr, u16 domain_id, u16 order)
+{
+ u64 addr_lo, addr_hi;
+ u32 cmd[4], entry;
+ int sflag = 0, pde = 0;
+
+ ASSERT ( order == 0 || order == 9 || order == 18 );
+
+ /* All pages associated with the domainID are invalidated */
+ if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
+ {
+ sflag = 1;
+ pde = 1;
+ }
+
+ /* If sflag == 1, the size of the invalidate command is determined
+ by the first zero bit in the address starting from Address[12] */
+ if ( order )
+ {
+ u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
+ io_addr &= ~mask;
+ io_addr |= mask - 1;
+ }
+
+ addr_lo = io_addr & DMA_32BIT_MASK;
+ addr_hi = io_addr >> 32;
+
+ set_field_in_reg_u32(domain_id, 0,
+ IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
+ IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+ cmd[1] = entry;
+
+ set_field_in_reg_u32(sflag, 0,
+ IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
+ IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
+ set_field_in_reg_u32(pde, entry,
+ IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
+ IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
+ set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
+ IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
+ IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
+ cmd[2] = entry;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
+ IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
+ cmd[3] = entry;
+
+ cmd[0] = 0;
+ send_iommu_command(iommu, cmd);
+}
+
+static void invalidate_iotlb_pages(struct amd_iommu *iommu,
+ u16 maxpend, u32 pasid, u16 queueid,
+ u64 io_addr, u16 dev_id, u16 order)
+{
+ u64 addr_lo, addr_hi;
+ u32 cmd[4], entry;
+ int sflag = 0;
+
+ ASSERT ( order == 0 || order == 9 || order == 18 );
+
+ if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
+ sflag = 1;
+
+ /* If sflag == 1, the size of the invalidate command is determined
+ by the first zero bit in the address starting from Address[12] */
+ if ( order )
+ {
+ u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
+ io_addr &= ~mask;
+ io_addr |= mask - 1;
+ }
+
+ addr_lo = io_addr & DMA_32BIT_MASK;
+ addr_hi = io_addr >> 32;
+
+ set_field_in_reg_u32(dev_id, 0,
+ IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK,
+ IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry);
+
+ set_field_in_reg_u32(maxpend, entry,
+ IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK,
+ IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry);
+
+ set_field_in_reg_u32(pasid & 0xff, entry,
+ IOMMU_INV_IOTLB_PAGES_PASID1_MASK,
+ IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry);
+ cmd[0] = entry;
+
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+
+ set_field_in_reg_u32(pasid >> 8, entry,
+ IOMMU_INV_IOTLB_PAGES_PASID2_MASK,
+ IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT,
+ &entry);
+
+ set_field_in_reg_u32(queueid, entry,
+ IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK,
+ IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT,
+ &entry);
+ cmd[1] = entry;
+
+ set_field_in_reg_u32(sflag, 0,
+ IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK,
+ IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry);
+
+ set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
+ IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK,
+ IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry);
+ cmd[2] = entry;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK,
+ IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry);
+ cmd[3] = entry;
+
+ send_iommu_command(iommu, cmd);
+}
+
+static void invalidate_dev_table_entry(struct amd_iommu *iommu,
+ u16 device_id)
+{
+ u32 cmd[4], entry;
+
+ cmd[3] = cmd[2] = 0;
+ set_field_in_reg_u32(device_id, 0,
+ IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
+ IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
+ cmd[0] = entry;
+
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+ cmd[1] = entry;
+
+ send_iommu_command(iommu, cmd);
+}
+
+static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
+{
+ u32 cmd[4], entry;
+
+ cmd[3] = cmd[2] = 0;
+ set_field_in_reg_u32(device_id, 0,
+ IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
+ IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
+ cmd[0] = entry;
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+ cmd[1] = entry;
+ send_iommu_command(iommu, cmd);
+}
+
+void amd_iommu_flush_iotlb(struct pci_dev *pdev,
+ uint64_t gaddr, unsigned int order)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ unsigned int bdf, req_id, queueid, maxpend;
+ struct pci_ats_dev *ats_pdev;
+
+ if ( !ats_enabled )
+ return;
+
+ ats_pdev = get_ats_device(pdev->seg, pdev->bus, pdev->devfn);
+ if ( ats_pdev == NULL )
+ return;
+
+ if ( !pci_ats_enabled(ats_pdev->seg, ats_pdev->bus, ats_pdev->devfn) )
+ return;
+
+ bdf = PCI_BDF2(ats_pdev->bus, ats_pdev->devfn);
+ iommu = find_iommu_for_device(ats_pdev->seg, bdf);
+
+ if ( !iommu )
+ {
+ AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n",
+ __func__, ats_pdev->seg, ats_pdev->bus,
+ PCI_SLOT(ats_pdev->devfn), PCI_FUNC(ats_pdev->devfn));
+ return;
+ }
+
+ if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
+ return;
+
+ req_id = get_dma_requestor_id(iommu->seg, bdf);
+ queueid = req_id;
+ maxpend = (ats_pdev->ats_queue_depth + 32) & 0xff;
+
+ /* send INVALIDATE_IOTLB_PAGES command */
+ spin_lock_irqsave(&iommu->lock, flags);
+ invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order);
+ flush_command_buffer(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
+ unsigned int order)
+{
+ struct pci_dev *pdev;
+
+ if ( !ats_enabled )
+ return;
+
+ for_each_pdev( d, pdev )
+ amd_iommu_flush_iotlb(pdev, gaddr, order);
+}
+
+/* Flush iommu cache after p2m changes. */
+static void _amd_iommu_flush_pages(struct domain *d,
+ uint64_t gaddr, unsigned int order)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ unsigned int dom_id = hd->domain_id;
+
+ /* send INVALIDATE_IOMMU_PAGES command */
+ for_each_amd_iommu ( iommu )
+ {
+ spin_lock_irqsave(&iommu->lock, flags);
+ invalidate_iommu_pages(iommu, gaddr, dom_id, order);
+ flush_command_buffer(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+ if ( ats_enabled )
+ amd_iommu_flush_all_iotlbs(d, gaddr, order);
+}
+
+void amd_iommu_flush_all_pages(struct domain *d)
+{
+ _amd_iommu_flush_pages(d, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
+}
+
+void amd_iommu_flush_pages(struct domain *d,
+ unsigned long gfn, unsigned int order)
+{
+ _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
+}
+
+void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf)
+{
+ ASSERT( spin_is_locked(&iommu->lock) );
+
+ invalidate_dev_table_entry(iommu, bdf);
+ flush_command_buffer(iommu);
+}
+
+void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf)
+{
+ ASSERT( spin_is_locked(&iommu->lock) );
+
+ invalidate_interrupt_table(iommu, bdf);
+ flush_command_buffer(iommu);
+}
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c Thu Nov 17 15:52:03 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c Fri Nov 18 11:07:40 2011 +0100
@@ -933,9 +933,8 @@ static int _invalidate_all_devices(
if ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_dev_table_entry(iommu, req_id);
- invalidate_interrupt_table(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_device(iommu, req_id);
+ amd_iommu_flush_intremap(iommu, req_id);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/iommu_intr.c
--- a/xen/drivers/passthrough/amd/iommu_intr.c Thu Nov 17 15:52:03 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_intr.c Fri Nov 18 11:07:40 2011 +0100
@@ -96,22 +96,6 @@ static void update_intremap_entry(u32* e
INT_REMAP_ENTRY_VECTOR_SHIFT, entry);
}
-void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
-{
- u32 cmd[4], entry;
-
- cmd[3] = cmd[2] = 0;
- set_field_in_reg_u32(device_id, 0,
- IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
- IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
- cmd[0] = entry;
- set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
- IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
- &entry);
- cmd[1] = entry;
- send_iommu_command(iommu, cmd);
-}
-
static void update_intremap_entry_from_ioapic(
int bdf,
struct amd_iommu *iommu,
@@ -144,8 +128,7 @@ static void update_intremap_entry_from_i
if ( iommu->enabled )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_interrupt_table(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_intremap(iommu, req_id);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
@@ -202,8 +185,7 @@ int __init amd_iommu_setup_ioapic_remapp
if ( iommu->enabled )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_interrupt_table(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_intremap(iommu, req_id);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
@@ -347,10 +329,9 @@ done:
if ( iommu->enabled )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_interrupt_table(iommu, req_id);
+ amd_iommu_flush_intremap(iommu, req_id);
if ( alias_id != req_id )
- invalidate_interrupt_table(iommu, alias_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_intremap(iommu, alias_id);
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Thu Nov 17 15:52:03 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c Fri Nov 18 11:07:40 2011 +0100
@@ -27,220 +27,6 @@
#include "../ats.h"
#include <xen/pci.h>
-static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
-{
- u32 tail, head, *cmd_buffer;
- int i;
-
- tail = iommu->cmd_buffer_tail;
- if ( ++tail == iommu->cmd_buffer.entries )
- tail = 0;
- head = get_field_from_reg_u32(
- readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET),
- IOMMU_CMD_BUFFER_HEAD_MASK,
- IOMMU_CMD_BUFFER_HEAD_SHIFT);
- if ( head != tail )
- {
- cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer +
- (iommu->cmd_buffer_tail *
- IOMMU_CMD_BUFFER_ENTRY_SIZE));
- for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ )
- cmd_buffer[i] = cmd[i];
-
- iommu->cmd_buffer_tail = tail;
- return 1;
- }
-
- return 0;
-}
-
-static void commit_iommu_command_buffer(struct amd_iommu *iommu)
-{
- u32 tail;
-
- set_field_in_reg_u32(iommu->cmd_buffer_tail, 0,
- IOMMU_CMD_BUFFER_TAIL_MASK,
- IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail);
- writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
-}
-
-int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
-{
- if ( queue_iommu_command(iommu, cmd) )
- {
- commit_iommu_command_buffer(iommu);
- return 1;
- }
-
- return 0;
-}
-
-static void invalidate_iommu_pages(struct amd_iommu *iommu,
- u64 io_addr, u16 domain_id, u16 order)
-{
- u64 addr_lo, addr_hi;
- u32 cmd[4], entry;
- int sflag = 0, pde = 0;
-
- ASSERT ( order == 0 || order == 9 || order == 18 );
-
- /* All pages associated with the domainID are invalidated */
- if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
- {
- sflag = 1;
- pde = 1;
- }
-
- /* If sflag == 1, the size of the invalidate command is determined
- by the first zero bit in the address starting from Address[12] */
- if ( order )
- {
- u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
- io_addr &= ~mask;
- io_addr |= mask - 1;
- }
-
- addr_lo = io_addr & DMA_32BIT_MASK;
- addr_hi = io_addr >> 32;
-
- set_field_in_reg_u32(domain_id, 0,
- IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
- IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
- set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
- IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
- &entry);
- cmd[1] = entry;
-
- set_field_in_reg_u32(sflag, 0,
- IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
- IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
- set_field_in_reg_u32(pde, entry,
- IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
- IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
- set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
- IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
- IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
- cmd[2] = entry;
-
- set_field_in_reg_u32((u32)addr_hi, 0,
- IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
- IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
- cmd[3] = entry;
-
- cmd[0] = 0;
- send_iommu_command(iommu, cmd);
-}
-
-static void invalidate_iotlb_pages(struct amd_iommu *iommu,
- u16 maxpend, u32 pasid, u16 queueid,
- u64 io_addr, u16 dev_id, u16 order)
-{
- u64 addr_lo, addr_hi;
- u32 cmd[4], entry;
- int sflag = 0;
-
- ASSERT ( order == 0 || order == 9 || order == 18 );
-
- if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
- sflag = 1;
-
- /* If sflag == 1, the size of the invalidate command is determined
- by the first zero bit in the address starting from Address[12] */
- if ( order )
- {
- u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
- io_addr &= ~mask;
- io_addr |= mask - 1;
- }
-
- addr_lo = io_addr & DMA_32BIT_MASK;
- addr_hi = io_addr >> 32;
-
- set_field_in_reg_u32(dev_id, 0,
- IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK,
- IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry);
-
- set_field_in_reg_u32(maxpend, entry,
- IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK,
- IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry);
-
- set_field_in_reg_u32(pasid & 0xff, entry,
- IOMMU_INV_IOTLB_PAGES_PASID1_MASK,
- IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry);
- cmd[0] = entry;
-
- set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0,
- IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
- &entry);
-
- set_field_in_reg_u32(pasid >> 8, entry,
- IOMMU_INV_IOTLB_PAGES_PASID2_MASK,
- IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT,
- &entry);
-
- set_field_in_reg_u32(queueid, entry,
- IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK,
- IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT,
- &entry);
- cmd[1] = entry;
-
- set_field_in_reg_u32(sflag, 0,
- IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK,
- IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry);
-
- set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
- IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK,
- IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry);
- cmd[2] = entry;
-
- set_field_in_reg_u32((u32)addr_hi, 0,
- IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK,
- IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry);
- cmd[3] = entry;
-
- send_iommu_command(iommu, cmd);
-}
-void flush_command_buffer(struct amd_iommu *iommu)
-{
- u32 cmd[4], status;
- int loop_count, comp_wait;
-
- /* clear 'ComWaitInt' in status register (WIC) */
- set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
- IOMMU_STATUS_COMP_WAIT_INT_MASK,
- IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status);
- writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
-
- /* send an empty COMPLETION_WAIT command to flush command buffer */
- cmd[3] = cmd[2] = 0;
- set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0,
- IOMMU_CMD_OPCODE_MASK,
- IOMMU_CMD_OPCODE_SHIFT, &cmd[1]);
- set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
- IOMMU_COMP_WAIT_I_FLAG_MASK,
- IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]);
- send_iommu_command(iommu, cmd);
-
- /* Make loop_count long enough for polling completion wait bit */
- loop_count = 1000;
- do {
- status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
- comp_wait = get_field_from_reg_u32(status,
- IOMMU_STATUS_COMP_WAIT_INT_MASK,
- IOMMU_STATUS_COMP_WAIT_INT_SHIFT);
- --loop_count;
- } while ( !comp_wait && loop_count );
-
- if ( comp_wait )
- {
- /* clear 'ComWaitInt' in status register (WIC) */
- status &= IOMMU_STATUS_COMP_WAIT_INT_MASK;
- writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
- return;
- }
- AMD_IOMMU_DEBUG("Warning: ComWaitInt bit did not assert!\n");
-}
-
/* Given pfn and page table level, return pde index */
static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
{
@@ -480,25 +266,6 @@ static int amd_iommu_is_pte_present(u32
IOMMU_PDE_PRESENT_SHIFT);
}
-void invalidate_dev_table_entry(struct amd_iommu *iommu,
- u16 device_id)
-{
- u32 cmd[4], entry;
-
- cmd[3] = cmd[2] = 0;
- set_field_in_reg_u32(device_id, 0,
- IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK,
- IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry);
- cmd[0] = entry;
-
- set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0,
- IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
- &entry);
- cmd[1] = entry;
-
- send_iommu_command(iommu, cmd);
-}
-
/* For each pde, We use ignored bits (bit 1 - bit 8 and bit 63)
* to save pde count, pde count = 511 is a candidate of page coalescing.
*/
@@ -809,8 +576,7 @@ static int update_paging_mode(struct dom
hd->domain_id,
hd->paging_mode, 1);
- invalidate_dev_table_entry(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_device(iommu, req_id);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -967,94 +733,6 @@ int amd_iommu_reserve_domain_unity_map(s
return 0;
}
-void amd_iommu_flush_iotlb(struct pci_dev *pdev,
- uint64_t gaddr, unsigned int order)
-{
- unsigned long flags;
- struct amd_iommu *iommu;
- unsigned int bdf, req_id, queueid, maxpend;
- struct pci_ats_dev *ats_pdev;
-
- if ( !ats_enabled )
- return;
-
- ats_pdev = get_ats_device(pdev->seg, pdev->bus, pdev->devfn);
- if ( ats_pdev == NULL )
- return;
-
- if ( !pci_ats_enabled(ats_pdev->seg, ats_pdev->bus, ats_pdev->devfn) )
- return;
-
- bdf = PCI_BDF2(ats_pdev->bus, ats_pdev->devfn);
- iommu = find_iommu_for_device(ats_pdev->seg, bdf);
-
- if ( !iommu )
- {
- AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n",
- __func__, ats_pdev->seg, ats_pdev->bus,
- PCI_SLOT(ats_pdev->devfn), PCI_FUNC(ats_pdev->devfn));
- return;
- }
-
- if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
- return;
-
- req_id = get_dma_requestor_id(iommu->seg, bdf);
- queueid = req_id;
- maxpend = (ats_pdev->ats_queue_depth + 32) & 0xff;
-
- /* send INVALIDATE_IOTLB_PAGES command */
- spin_lock_irqsave(&iommu->lock, flags);
- invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order);
- flush_command_buffer(iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr,
- unsigned int order)
-{
- struct pci_dev *pdev;
-
- if ( !ats_enabled )
- return;
-
- for_each_pdev( d, pdev )
- amd_iommu_flush_iotlb(pdev, gaddr, order);
-}
-
-/* Flush iommu cache after p2m changes. */
-static void _amd_iommu_flush_pages(struct domain *d,
- uint64_t gaddr, unsigned int order)
-{
- unsigned long flags;
- struct amd_iommu *iommu;
- struct hvm_iommu *hd = domain_hvm_iommu(d);
- unsigned int dom_id = hd->domain_id;
-
- /* send INVALIDATE_IOMMU_PAGES command */
- for_each_amd_iommu ( iommu )
- {
- spin_lock_irqsave(&iommu->lock, flags);
- invalidate_iommu_pages(iommu, gaddr, dom_id, order);
- flush_command_buffer(iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
-
- if ( ats_enabled )
- amd_iommu_flush_all_iotlbs(d, gaddr, order);
-}
-
-void amd_iommu_flush_all_pages(struct domain *d)
-{
- _amd_iommu_flush_pages(d, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
-void amd_iommu_flush_pages(struct domain *d,
- unsigned long gfn, unsigned int order)
-{
- _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order);
-}
-
/* Share p2m table with iommu. */
void amd_iommu_share_p2m(struct domain *d)
{
diff -r 135678ae1bbc -r 6ef12cc262fb xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Nov 17 15:52:03
2011 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Nov 18 11:07:40
2011 +0100
@@ -118,8 +118,7 @@ static void amd_iommu_setup_domain_devic
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
iommu_dte_set_iotlb((u32 *)dte, dte_i);
- invalidate_dev_table_entry(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_device(iommu, req_id);
AMD_IOMMU_DEBUG("Setup I/O page table: device id = 0x%04x, "
"root table = 0x%"PRIx64", "
@@ -310,8 +309,8 @@ void amd_iommu_disable_domain_device(str
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
iommu_dte_set_iotlb((u32 *)dte, 0);
- invalidate_dev_table_entry(iommu, req_id);
- flush_command_buffer(iommu);
+ amd_iommu_flush_device(iommu, req_id);
+
AMD_IOMMU_DEBUG("Disable: device id = 0x%04x, "
"domain = %d, paging mode = %d\n",
req_id, domain_hvm_iommu(domain)->domain_id,
diff -r 135678ae1bbc -r 6ef12cc262fb
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Nov 17 15:52:03
2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Fri Nov 18 11:07:40
2011 +0100
@@ -53,12 +53,6 @@ int amd_iommu_update_ivrs_mapping_acpi(v
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned int flags);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
-void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
- unsigned int order);
-void amd_iommu_flush_all_pages(struct domain *d);
-void amd_iommu_flush_iotlb(struct pci_dev *pdev, uint64_t gaddr,
- unsigned int order);
-
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
u64 phys_addr, unsigned long size,
@@ -75,11 +69,15 @@ void amd_iommu_set_root_page_table(
u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid);
void iommu_dte_set_iotlb(u32 *dte, u8 i);
void iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings *ivrs_dev);
-void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
/* send cmd to iommu */
-int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
-void flush_command_buffer(struct amd_iommu *iommu);
+void amd_iommu_flush_all_pages(struct domain *d);
+void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
+ unsigned int order);
+void amd_iommu_flush_iotlb(struct pci_dev *pdev, uint64_t gaddr,
+ unsigned int order);
+void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
+void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
/* find iommu for bdf */
struct amd_iommu *find_iommu_for_device(int seg, int bdf);
@@ -88,7 +86,6 @@ struct amd_iommu *find_iommu_for_device(
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(void);
int amd_iommu_free_intremap_table(u16 seg, struct ivrs_mappings *);
-void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
void amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value);
void amd_iommu_msi_msg_update_ire(
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|