|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 02/23] xen: move NUMA_NO_NODE to public memory.h as XEN_NUMA_NO_NODE
Update NUMA_NO_NODE in Xen code to use the new macro.
No functional change introduced.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jan Beulich <JBeulich@xxxxxxxx>
---
xen/arch/x86/hpet.c | 2 +-
xen/arch/x86/irq.c | 4 ++--
xen/arch/x86/numa.c | 14 +++++++-------
xen/arch/x86/physdev.c | 2 +-
xen/arch/x86/setup.c | 2 +-
xen/arch/x86/smpboot.c | 2 +-
xen/arch/x86/srat.c | 28 ++++++++++++++--------------
xen/arch/x86/x86_64/mm.c | 2 +-
xen/common/page_alloc.c | 4 ++--
xen/drivers/passthrough/amd/iommu_init.c | 2 +-
xen/drivers/passthrough/vtd/iommu.c | 8 ++++----
xen/include/public/memory.h | 2 ++
xen/include/xen/numa.h | 5 ++---
13 files changed, 39 insertions(+), 38 deletions(-)
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 8f36f6f..3b6d12f 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -375,7 +375,7 @@ static int __init hpet_assign_irq(struct hpet_event_channel
*ch)
{
int irq;
- if ( (irq = create_irq(NUMA_NO_NODE)) < 0 )
+ if ( (irq = create_irq(XEN_NUMA_NO_NODE)) < 0 )
return irq;
ch->msi.irq = irq;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 786d1fc..deb67d7 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -173,7 +173,7 @@ int create_irq(nodeid_t node)
{
cpumask_t *mask = NULL;
- if ( node != NUMA_NO_NODE )
+ if ( node != XEN_NUMA_NO_NODE )
{
mask = &node_to_cpumask(node);
if (cpumask_empty(mask))
@@ -2000,7 +2000,7 @@ int map_domain_pirq(
spin_unlock_irqrestore(&desc->lock, flags);
info = NULL;
- irq = create_irq(NUMA_NO_NODE);
+ irq = create_irq(XEN_NUMA_NO_NODE);
ret = irq >= 0 ? prepare_domain_irq_pirq(d, irq, pirq + nr, &info)
: irq;
if ( ret )
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 132d694..6e1a0b8 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -37,13 +37,13 @@ unsigned long memnodemapsize;
u8 *memnodemap;
nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
- [0 ... NR_CPUS-1] = NUMA_NO_NODE
+ [0 ... NR_CPUS-1] = XEN_NUMA_NO_NODE
};
/*
* Keep BIOS's CPU2node information, should not be used for memory allocaion
*/
nodeid_t apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
- [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+ [0 ... MAX_LOCAL_APIC-1] = XEN_NUMA_NO_NODE
};
cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
@@ -71,7 +71,7 @@ static int __init populate_memnodemap(const struct node
*nodes,
unsigned long spdx, epdx;
int i, res = -1;
- memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
+ memset(memnodemap, XEN_NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
for ( i = 0; i < numnodes; i++ )
{
spdx = paddr_to_pdx(nodes[i].start);
@@ -81,7 +81,7 @@ static int __init populate_memnodemap(const struct node
*nodes,
if ( (epdx >> shift) >= memnodemapsize )
return 0;
do {
- if ( memnodemap[spdx >> shift] != NUMA_NO_NODE )
+ if ( memnodemap[spdx >> shift] != XEN_NUMA_NO_NODE )
return -1;
if ( !nodeids )
@@ -199,7 +199,7 @@ void __init numa_init_array(void)
rr = first_node(node_online_map);
for ( i = 0; i < nr_cpu_ids; i++ )
{
- if ( cpu_to_node[i] != NUMA_NO_NODE )
+ if ( cpu_to_node[i] != XEN_NUMA_NO_NODE )
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
@@ -350,7 +350,7 @@ void __init init_cpu_to_node(void)
if ( apicid == BAD_APICID )
continue;
node = apicid_to_node[apicid];
- if ( node == NUMA_NO_NODE || !node_online(node) )
+ if ( node == XEN_NUMA_NO_NODE || !node_online(node) )
node = 0;
numa_set_node(i, node);
}
@@ -433,7 +433,7 @@ static void dump_numa(unsigned char key)
err = snprintf(keyhandler_scratch, 12, "%3u",
vnuma->vnode_to_pnode[i]);
- if ( err < 0 || vnuma->vnode_to_pnode[i] == NUMA_NO_NODE )
+ if ( err < 0 || vnuma->vnode_to_pnode[i] == XEN_NUMA_NO_NODE )
strlcpy(keyhandler_scratch, "???", sizeof(keyhandler_scratch));
printk(" %3u: pnode %s,", i, keyhandler_scratch);
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 1be1d50..a3a9564 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -146,7 +146,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index,
int *pirq_p,
irq = *index;
if ( irq == -1 )
case MAP_PIRQ_TYPE_MULTI_MSI:
- irq = create_irq(NUMA_NO_NODE);
+ irq = create_irq(XEN_NUMA_NO_NODE);
if ( irq < nr_irqs_gsi || irq >= nr_irqs )
{
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 7593533..c4138ec 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -192,7 +192,7 @@ void __devinit srat_detect_node(int cpu)
u32 apicid = x86_cpu_to_apicid[cpu];
node = apicid_to_node[apicid];
- if ( node == NUMA_NO_NODE )
+ if ( node == XEN_NUMA_NO_NODE )
node = 0;
node_set_online(node);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 314e253..168dd6e 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -879,7 +879,7 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t
pxm)
{
nodeid_t node = setup_node(pxm);
- if ( node == NUMA_NO_NODE )
+ if ( node == XEN_NUMA_NO_NODE )
{
dprintk(XENLOG_WARNING,
"Setup node failed for pxm %x\n", pxm);
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index dfabba3..ed91b99 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -33,7 +33,7 @@ struct pxm2node {
nodeid_t node;
};
static struct pxm2node __read_mostly pxm2node[MAX_NUMNODES] =
- { [0 ... MAX_NUMNODES - 1] = {.node = NUMA_NO_NODE} };
+ { [0 ... MAX_NUMNODES - 1] = {.node = XEN_NUMA_NO_NODE} };
static unsigned node_to_pxm(nodeid_t n);
@@ -44,7 +44,7 @@ static nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
static inline bool_t node_found(unsigned idx, unsigned pxm)
{
return ((pxm2node[idx].pxm == pxm) &&
- (pxm2node[idx].node != NUMA_NO_NODE));
+ (pxm2node[idx].node != XEN_NUMA_NO_NODE));
}
nodeid_t pxm_to_node(unsigned pxm)
@@ -58,7 +58,7 @@ nodeid_t pxm_to_node(unsigned pxm)
if (node_found(i, pxm))
return pxm2node[i].node;
- return NUMA_NO_NODE;
+ return XEN_NUMA_NO_NODE;
}
__devinit nodeid_t setup_node(unsigned pxm)
@@ -67,21 +67,21 @@ __devinit nodeid_t setup_node(unsigned pxm)
unsigned idx;
static bool_t warned;
- BUILD_BUG_ON(MAX_NUMNODES >= NUMA_NO_NODE);
+ BUILD_BUG_ON(MAX_NUMNODES >= XEN_NUMA_NO_NODE);
if (pxm < ARRAY_SIZE(pxm2node)) {
if (node_found(pxm, pxm))
return pxm2node[pxm].node;
/* Try to maintain indexing of pxm2node by pxm */
- if (pxm2node[pxm].node == NUMA_NO_NODE) {
+ if (pxm2node[pxm].node == XEN_NUMA_NO_NODE) {
idx = pxm;
goto finish;
}
}
for (idx = 0; idx < ARRAY_SIZE(pxm2node); idx++)
- if (pxm2node[idx].node == NUMA_NO_NODE)
+ if (pxm2node[idx].node == XEN_NUMA_NO_NODE)
goto finish;
if (!warned) {
@@ -89,7 +89,7 @@ __devinit nodeid_t setup_node(unsigned pxm)
warned = 1;
}
- return NUMA_NO_NODE;
+ return XEN_NUMA_NO_NODE;
finish:
node = first_unset_node(nodes_found);
@@ -152,9 +152,9 @@ static __init void bad_srat(void)
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
for (i = 0; i < MAX_LOCAL_APIC; i++)
- apicid_to_node[i] = NUMA_NO_NODE;
+ apicid_to_node[i] = XEN_NUMA_NO_NODE;
for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
- pxm2node[i].node = NUMA_NO_NODE;
+ pxm2node[i].node = XEN_NUMA_NO_NODE;
mem_hotplug = 0;
}
@@ -218,7 +218,7 @@ acpi_numa_x2apic_affinity_init(struct
acpi_srat_x2apic_cpu_affinity *pa)
return;
pxm = pa->proximity_domain;
node = setup_node(pxm);
- if (node == NUMA_NO_NODE) {
+ if (node == XEN_NUMA_NO_NODE) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
@@ -253,7 +253,7 @@ acpi_numa_processor_affinity_init(struct
acpi_srat_cpu_affinity *pa)
pxm |= pa->proximity_domain_hi[2] << 24;
}
node = setup_node(pxm);
- if (node == NUMA_NO_NODE) {
+ if (node == XEN_NUMA_NO_NODE) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
@@ -298,7 +298,7 @@ acpi_numa_memory_affinity_init(struct
acpi_srat_mem_affinity *ma)
if (srat_rev < 2)
pxm &= 0xff;
node = setup_node(pxm);
- if (node == NUMA_NO_NODE) {
+ if (node == XEN_NUMA_NO_NODE) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
return;
@@ -475,10 +475,10 @@ int __init acpi_scan_nodes(u64 start, u64 end)
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
}
for (i = 0; i < nr_cpu_ids; i++) {
- if (cpu_to_node[i] == NUMA_NO_NODE)
+ if (cpu_to_node[i] == XEN_NUMA_NO_NODE)
continue;
if (!node_isset(cpu_to_node[i], processor_nodes_parsed))
- numa_set_node(i, NUMA_NO_NODE);
+ numa_set_node(i, XEN_NUMA_NO_NODE);
}
numa_init_array();
return 0;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 6875c92..c6b7f7c 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1354,7 +1354,7 @@ int memory_add(unsigned long spfn, unsigned long epfn,
unsigned int pxm)
if ( !mem_hotadd_check(spfn, epfn) )
return -EINVAL;
- if ( (node = setup_node(pxm)) == NUMA_NO_NODE )
+ if ( (node = setup_node(pxm)) == XEN_NUMA_NO_NODE )
return -EINVAL;
if ( !valid_numa_range(spfn << PAGE_SHIFT, epfn << PAGE_SHIFT, node) )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index d96d25b..f62ee74 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -591,7 +591,7 @@ static struct page_info *alloc_heap_pages(
/* Make sure there are enough bits in memflags for nodeID. */
BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
- if ( node == NUMA_NO_NODE )
+ if ( node == XEN_NUMA_NO_NODE )
{
memflags &= ~MEMF_exact_node;
if ( d != NULL )
@@ -1291,7 +1291,7 @@ static void __init smp_scrub_heap_pages(void *data)
else
{
node = cpu_to_node(cpu);
- if ( node == NUMA_NO_NODE )
+ if ( node == XEN_NUMA_NO_NODE )
return;
r = ®ion[node];
}
diff --git a/xen/drivers/passthrough/amd/iommu_init.c
b/xen/drivers/passthrough/amd/iommu_init.c
index 56bda00..13fe7ae 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -782,7 +782,7 @@ static bool_t __init set_iommu_interrupt_handler(struct
amd_iommu *iommu)
unsigned long flags;
u16 control;
- irq = create_irq(NUMA_NO_NODE);
+ irq = create_irq(XEN_NUMA_NO_NODE);
if ( irq <= 0 )
{
dprintk(XENLOG_ERR, "IOMMU: no irqs\n");
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 1063677..d03b2a8 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -190,7 +190,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd,
unsigned long npages)
struct acpi_rhsa_unit *rhsa;
struct page_info *pg, *cur_pg;
u64 *vaddr;
- nodeid_t node = NUMA_NO_NODE;
+ nodeid_t node = XEN_NUMA_NO_NODE;
unsigned int i;
rhsa = drhd_to_rhsa(drhd);
@@ -198,7 +198,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd,
unsigned long npages)
node = pxm_to_node(rhsa->proximity_domain);
pg = alloc_domheap_pages(NULL, get_order_from_pages(npages),
- (node == NUMA_NO_NODE) ? 0 : MEMF_node(node));
+ (node == XEN_NUMA_NO_NODE) ? 0 : MEMF_node(node));
if ( !pg )
return 0;
@@ -1064,7 +1064,7 @@ static int __init iommu_set_interrupt(struct
acpi_drhd_unit *drhd)
struct irq_desc *desc;
irq = create_irq(rhsa ? pxm_to_node(rhsa->proximity_domain)
- : NUMA_NO_NODE);
+ : XEN_NUMA_NO_NODE);
if ( irq <= 0 )
{
dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no irq available!\n");
@@ -1959,7 +1959,7 @@ static void adjust_irq_affinity(struct acpi_drhd_unit
*drhd)
{
const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
- : NUMA_NO_NODE;
+ : XEN_NUMA_NO_NODE;
const cpumask_t *cpumask = &cpu_online_map;
if ( node < MAX_NUMNODES && node_online(node) &&
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 595f953..0d8c85f 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -29,6 +29,8 @@
#include "xen.h"
+#define XEN_NUMA_NO_NODE 0xFF
+
/*
* Increase or decrease the specified domain's memory reservation. Returns the
* number of extents successfully allocated or freed.
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index ac4b391..60daaa5 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -1,20 +1,19 @@
#ifndef _XEN_NUMA_H
#define _XEN_NUMA_H
+#include <public/memory.h>
#include <asm/numa.h>
#ifndef NODES_SHIFT
#define NODES_SHIFT 0
#endif
-#define NUMA_NO_NODE 0xFF
-
#define MAX_NUMNODES (1 << NODES_SHIFT)
#define vcpu_to_node(v) (cpu_to_node((v)->processor))
#define domain_to_node(d) \
(((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
- ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
+ ? vcpu_to_node((d)->vcpu[0]) : XEN_NUMA_NO_NODE)
#endif /* _XEN_NUMA_H */
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |