# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1292320377 0
# Node ID fd4cbfbbd83e6091a343844eae1da1468f54b72b
# Parent ab785e37499c8cdadd1fd5e4ab1bfbbacebf358b
x86/iommu: don't map RAM holes above 4G
Matching the comment in iommu_set_dom0_mapping(), map only actual RAM
from the address range starting at 4G. It's not clear though whether
that comment is actually correct (which is why I'm sending this as
RFC), but it is certain that on systems with sparse physical memory
map we're currently wasting a potentially significant amount of memory
for setting up IOMMU page tables that will never be used.
The main question is what happens for MMIO ranges living above 4G. Of
course, the same issue would currently exist for any such ranges
sitting beyon the end of RAM.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
xen/drivers/passthrough/amd/pci_amd_iommu.c | 12 ++++++++++--
xen/drivers/passthrough/vtd/x86/vtd.c | 21 +++++++++++++--------
2 files changed, 23 insertions(+), 10 deletions(-)
diff -r ab785e37499c -r fd4cbfbbd83e xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Dec 09 14:50:56
2010 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Tue Dec 14 09:52:57
2010 +0000
@@ -239,8 +239,16 @@ static void __init amd_iommu_dom0_init(s
if ( !iommu_passthrough && !need_iommu(d) )
{
/* Set up 1:1 page table for dom0 */
- for ( i = 0; i < max_page; i++ )
- amd_iommu_map_page(d, i, i, IOMMUF_readable|IOMMUF_writable);
+ for ( i = 0; i < max_pdx; i++ )
+ {
+ unsigned long pfn = pdx_to_pfn(i);
+
+ /*
+ * XXX Should we really map all non-RAM (above 4G)? Minimally
+ * a pfn_valid() check would seem desirable here.
+ */
+ amd_iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
+ }
}
amd_iommu_setup_dom0_devices(d);
diff -r ab785e37499c -r fd4cbfbbd83e xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu Dec 09 14:50:56 2010 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c Tue Dec 14 09:52:57 2010 +0000
@@ -129,14 +129,14 @@ void hvm_dpci_isairq_eoi(struct domain *
void __init iommu_set_dom0_mapping(struct domain *d)
{
- u64 i, j, tmp, max_pfn;
+ unsigned long i, j, tmp, top;
extern int xen_in_range(unsigned long mfn);
BUG_ON(d->domain_id != 0);
- max_pfn = max_t(u64, max_page, 0x100000000ull >> PAGE_SHIFT);
+ top = max(max_pdx, pfn_to_pdx(0xffffffffUL >> PAGE_SHIFT) + 1);
- for ( i = 0; i < max_pfn; i++ )
+ for ( i = 0; i < top; i++ )
{
/*
* Set up 1:1 mapping for dom0. Default to use only conventional RAM
@@ -144,18 +144,23 @@ void __init iommu_set_dom0_mapping(struc
* inclusive mapping maps in everything below 4GB except unusable
* ranges.
*/
- if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
- (!iommu_inclusive_mapping ||
- page_is_ram_type(i, RAM_TYPE_UNUSABLE)) )
+ unsigned long pfn = pdx_to_pfn(i);
+
+ if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ?
+ (!mfn_valid(pfn) ||
+ !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) :
+ iommu_inclusive_mapping ?
+ page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) :
+ !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
/* Exclude Xen bits */
- if ( xen_in_range(i) )
+ if ( xen_in_range(pfn) )
continue;
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (i*tmp+j), (i*tmp+j),
+ iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
IOMMUF_readable|IOMMUF_writable);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|