From: David Vrabel <david.vrabel@xxxxxxxxxx>
In xen_memory_setup() all reserved regions and gaps are set to an
identity (1-1) p2m mapping. If an available page has a PFN within one
of these 1-1 mappings it will become inaccessible (as it MFN is lost)
so release them before setting up the mapping.
This can make an additional 256 MiB or more of RAM available
(depending on the size of the reserved regions in the memory map) if
the initial pages overlap with reserved regions.
Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
arch/x86/xen/setup.c | 100 ++++++++++++++++---------------------------------
1 files changed, 33 insertions(+), 67 deletions(-)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 6433371..986661b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -126,72 +126,44 @@ static unsigned long __init xen_release_chunk(phys_addr_t
start_addr,
return len;
}
-static unsigned long __init xen_return_unused_memory(
- unsigned long max_pfn, const struct e820entry *map, int nr_map)
+static unsigned long __init xen_set_identity_and_release(
+ const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = ISA_END_ADDRESS;
+ phys_addr_t avail_end = PFN_PHYS(nr_pages);
+ phys_addr_t last_end = 0;
unsigned long released = 0;
- int i;
-
- /* Free any unused memory above the low 1Mbyte. */
- for (i = 0; i < nr_map && last_end < max_addr; i++) {
- phys_addr_t end = map[i].addr;
- end = min(max_addr, end);
-
- if (last_end < end)
- released += xen_release_chunk(last_end, end);
- last_end = max(last_end, map[i].addr + map[i].size);
- }
-
- if (last_end < max_addr)
- released += xen_release_chunk(last_end, max_addr);
-
- printk(KERN_INFO "released %lu pages of unused memory\n", released);
- return released;
-}
-
-static unsigned long __init xen_set_identity(const struct e820entry *list,
- ssize_t map_size)
-{
- phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
- phys_addr_t start_pci = last;
- const struct e820entry *entry;
unsigned long identity = 0;
+ const struct e820entry *entry;
int i;
+ /*
+ * For each memory region consider whether to release and map
+ * the region and the preceeding gap (if any). If the region
+ * is RAM, only the gap is released and mapped.
+ */
for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t start = entry->addr;
- phys_addr_t end = start + entry->size;
+ phys_addr_t begin = last_end;
+ phys_addr_t end = entry->addr + entry->size;
- if (start < last)
- start = last;
+ last_end = end;
- if (end <= start)
- continue;
-
- /* Skip over the 1MB region. */
- if (last > end)
- continue;
+ if (entry->type == E820_RAM || entry->type == E820_UNUSABLE)
+ end = entry->addr;
- if ((entry->type == E820_RAM) || (entry->type ==
E820_UNUSABLE)) {
- if (start > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci),
PFN_DOWN(start));
+ if (begin < end) {
+ if (begin < avail_end)
+ released += xen_release_chunk(
+ begin, min(end, avail_end));
- /* Without saving 'last' we would gooble RAM too
- * at the end of the loop. */
- last = end;
- start_pci = end;
- continue;
+ identity += set_phys_range_identity(
+ PFN_UP(begin), PFN_DOWN(end));
}
- start_pci = min(start, start_pci);
- last = end;
}
- if (last > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(last));
- return identity;
+
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+
+ return released;
}
static unsigned long __init xen_get_max_pages(void)
@@ -219,7 +191,6 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
- unsigned long identity_pages = 0;
int i;
int op;
@@ -252,8 +223,13 @@ char * __init xen_memory_setup(void)
if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn;
- xen_released_pages = xen_return_unused_memory(max_pfn, map,
- memmap.nr_entries);
+ /*
+ * Set P2M for all non-RAM pages and E820 gaps to be identity
+ * type PFNs. Any RAM pages that would be made inaccesible by
+ * this are first released.
+ */
+ xen_released_pages = xen_set_identity_and_release(
+ map, memmap.nr_entries, max_pfn);
extra_pages += xen_released_pages;
/*
@@ -303,10 +279,6 @@ char * __init xen_memory_setup(void)
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
- *
- * In Dom0, the host E820 information can leave gaps in the
- * ISA range, which would cause us to release those pages. To
- * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -323,12 +295,6 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- /*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs.
- */
- identity_pages = xen_set_identity(e820.map, e820.nr_map);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|