This patch allocates the guest's memory according to the given setup.
The appropriate host node number will be determined by the guest_to_host
mapping (which is simply round robin until the code for automatic
allocation is in place).
Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Regards,
Andre.
--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 488-3567-12
----to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd; Thomas M. McCoy; Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
commit 1c5557b78834fac72ae2455295f31c017208156f
Author: Andre Przywara <andre.przywara@xxxxxxx>
Date: Mon Feb 1 12:23:19 2010 +0100
allocate guest memory according to specified NUMA setup
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
index 02103b1..3eb06c8 100644
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -121,6 +121,8 @@ static int setup_guest(int xc_handle,
struct elf_binary elf;
uint64_t v_start, v_end;
int rc;
+ int hostnode, node;
+ uint64_t next_node;
xen_capabilities_info_t caps;
int pod_mode = 0;
@@ -174,8 +176,16 @@ static int setup_guest(int xc_handle,
* We allocate pages in batches of no more than 8MB to ensure that
* we can be preempted and hence dom0 remains responsive.
*/
+ hostnode = -1;
+ node = 0;
+ next_node = nr_pages;
+ if (numainfo != NULL && numainfo->num_nodes > 1)
+ {
+ next_node = numainfo->node_mem[node] >> (PAGE_SHIFT - 10);
+ hostnode = numainfo->guest_to_host_node[node];
+ }
rc = xc_domain_memory_populate_physmap(
- xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]);
+ xc_handle, dom, 0xa0, 0, XENMEMF_node(hostnode), &page_array[0x00]);
cur_pages = 0xc0;
while ( (rc == 0) && (nr_pages > cur_pages) )
{
@@ -184,6 +194,13 @@ static int setup_guest(int xc_handle,
if ( count > 2048 )
count = 2048;
+ if (cur_pages >= next_node)
+ {
+ node++;
+ next_node += numainfo->node_mem[node] >> (PAGE_SHIFT - 10);
+ hostnode = numainfo->guest_to_host_node[node];
+ }
+
/* Clip partial superpage extents to superpage boundaries. */
if ( ((cur_pages & (SUPERPAGE_NR_PFNS-1)) != 0) &&
(count > (-cur_pages & (SUPERPAGE_NR_PFNS-1))) )
@@ -200,11 +217,12 @@ static int setup_guest(int xc_handle,
struct xen_memory_reservation sp_req = {
.nr_extents = count >> SUPERPAGE_PFN_SHIFT,
.extent_order = SUPERPAGE_PFN_SHIFT,
- .domid = dom
+ .domid = dom,
+ .mem_flags = XENMEMF_node(hostnode)
};
if ( pod_mode )
- sp_req.mem_flags = XENMEMF_populate_on_demand;
+ sp_req.mem_flags |= XENMEMF_populate_on_demand;
set_xen_guest_handle(sp_req.extent_start, sp_extents);
for ( i = 0; i < sp_req.nr_extents; i++ )
@@ -227,7 +245,8 @@ static int setup_guest(int xc_handle,
if ( count != 0 )
{
rc = xc_domain_memory_populate_physmap(
- xc_handle, dom, count, 0, 0, &page_array[cur_pages]);
+ xc_handle, dom, count, 0, XENMEMF_node(hostnode),
+ &page_array[cur_pages]);
cur_pages += count;
if ( pod_mode )
pod_pages -= count;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|