Make various data items const or __read_mostly where possible/reasonable.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- 2009-10-27.orig/xen/arch/ia64/linux-xen/acpi.c 2009-05-27
13:54:05.000000000 +0200
+++ 2009-10-27/xen/arch/ia64/linux-xen/acpi.c 2009-10-27 12:07:32.000000000
+0100
@@ -66,9 +66,9 @@ extern u8 numa_slit[MAX_NUMNODES * MAX_N
#define PREFIX "ACPI: "
-void (*pm_idle) (void);
+void (*pm_idle) (void) __read_mostly;
EXPORT_SYMBOL(pm_idle);
-void (*pm_power_off) (void);
+void (*pm_power_off) (void) __read_mostly;
EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
--- 2009-10-27.orig/xen/arch/ia64/linux-xen/numa.c 2006-10-30
12:07:18.000000000 +0100
+++ 2009-10-27/xen/arch/ia64/linux-xen/numa.c 2009-10-27 12:07:32.000000000
+0100
@@ -32,7 +32,7 @@
#endif
#ifdef XEN
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
#endif
u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
--- 2009-10-27.orig/xen/arch/x86/domain.c 2009-10-22 08:18:54.000000000
+0200
+++ 2009-10-27/xen/arch/x86/domain.c 2009-10-27 12:07:32.000000000 +0100
@@ -61,8 +61,8 @@ DEFINE_PER_CPU(unsigned long, cr4);
static void default_idle(void);
static void default_dead_idle(void);
-void (*pm_idle) (void) = default_idle;
-void (*dead_idle) (void) = default_dead_idle;
+void (*pm_idle) (void) __read_mostly = default_idle;
+void (*dead_idle) (void) __read_mostly = default_dead_idle;
static void paravirt_ctxt_switch_from(struct vcpu *v);
static void paravirt_ctxt_switch_to(struct vcpu *v);
--- 2009-10-27.orig/xen/arch/x86/genapic/bigsmp.c 2007-08-07
15:00:27.000000000 +0200
+++ 2009-10-27/xen/arch/x86/genapic/bigsmp.c 2009-10-27 12:07:32.000000000
+0100
@@ -48,7 +48,7 @@ static __init int probe_bigsmp(void)
return dmi_bigsmp;
}
-struct genapic apic_bigsmp = {
+const struct genapic apic_bigsmp = {
APIC_INIT("bigsmp", probe_bigsmp),
GENAPIC_PHYS
};
--- 2009-10-27.orig/xen/arch/x86/genapic/default.c 2006-04-05
09:50:16.000000000 +0200
+++ 2009-10-27/xen/arch/x86/genapic/default.c 2009-10-27 12:07:32.000000000
+0100
@@ -20,7 +20,7 @@ static __init int probe_default(void)
return 1;
}
-struct genapic apic_default = {
+const struct genapic apic_default = {
APIC_INIT("default", probe_default),
GENAPIC_FLAT
};
--- 2009-10-27.orig/xen/arch/x86/genapic/probe.c 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/arch/x86/genapic/probe.c 2009-10-27 12:09:01.000000000
+0100
@@ -9,19 +9,20 @@
#include <xen/kernel.h>
#include <xen/ctype.h>
#include <xen/init.h>
+#include <asm/cache.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/mach-generic/mach_apic.h>
#include <asm/setup.h>
-extern struct genapic apic_summit;
-extern struct genapic apic_bigsmp;
-extern struct genapic apic_default;
+extern const struct genapic apic_summit;
+extern const struct genapic apic_bigsmp;
+extern const struct genapic apic_default;
-struct genapic *genapic;
+const struct genapic *__read_mostly genapic;
-struct genapic *apic_probe[] __initdata = {
+const struct genapic *apic_probe[] __initdata = {
&apic_summit,
&apic_bigsmp,
&apic_default, /* must be last */
--- 2009-10-27.orig/xen/arch/x86/genapic/summit.c 2006-04-05
09:50:16.000000000 +0200
+++ 2009-10-27/xen/arch/x86/genapic/summit.c 2009-10-27 12:07:32.000000000
+0100
@@ -20,7 +20,7 @@ static __init int probe_summit(void)
return 0;
}
-struct genapic apic_summit = {
+const struct genapic apic_summit = {
APIC_INIT("summit", probe_summit),
GENAPIC_PHYS
};
--- 2009-10-27.orig/xen/arch/x86/genapic/x2apic.c 2009-10-01
10:53:02.000000000 +0200
+++ 2009-10-27/xen/arch/x86/genapic/x2apic.c 2009-10-27 12:07:32.000000000
+0100
@@ -23,10 +23,10 @@
#include <xen/smp.h>
#include <asm/mach-default/mach_mpparse.h>
-static int x2apic = 1;
+static int __initdata x2apic = 1;
boolean_param("x2apic", x2apic);
-static int x2apic_phys = 0; /* By default we use logical cluster mode. */
+static int __initdata x2apic_phys; /* By default we use logical cluster mode.
*/
boolean_param("x2apic_phys", x2apic_phys);
static int __init probe_x2apic_phys(void)
@@ -41,12 +41,12 @@ static int __init probe_x2apic_cluster(v
iommu_supports_eim();
}
-struct genapic apic_x2apic_phys= {
+const struct genapic apic_x2apic_phys = {
APIC_INIT("x2apic_phys", probe_x2apic_phys),
GENAPIC_X2APIC_PHYS
};
-struct genapic apic_x2apic_cluster= {
+const struct genapic apic_x2apic_cluster = {
APIC_INIT("x2apic_cluster", probe_x2apic_cluster),
GENAPIC_X2APIC_CLUSTER
};
--- 2009-10-27.orig/xen/arch/x86/hvm/emulate.c 2009-10-07 13:31:36.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/emulate.c 2009-10-27 12:07:32.000000000
+0100
@@ -929,7 +929,7 @@ static int hvmemul_invlpg(
return rc;
}
-static struct x86_emulate_ops hvm_emulate_ops = {
+static const struct x86_emulate_ops hvm_emulate_ops = {
.read = hvmemul_read,
.insn_fetch = hvmemul_insn_fetch,
.write = hvmemul_write,
--- 2009-10-27.orig/xen/arch/x86/hvm/hpet.c 2009-07-10 08:51:30.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/hpet.c 2009-10-27 12:07:32.000000000 +0100
@@ -485,7 +485,7 @@ static int hpet_range(struct vcpu *v, un
(addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)));
}
-struct hvm_mmio_handler hpet_mmio_handler = {
+const struct hvm_mmio_handler hpet_mmio_handler = {
.check_handler = hpet_range,
.read_handler = hpet_read,
.write_handler = hpet_write
--- 2009-10-27.orig/xen/arch/x86/hvm/intercept.c 2009-04-09
14:05:35.000000000 +0200
+++ 2009-10-27/xen/arch/x86/hvm/intercept.c 2009-10-27 12:07:32.000000000
+0100
@@ -32,14 +32,15 @@
#include <xen/event.h>
#include <xen/iommu.h>
-extern struct hvm_mmio_handler hpet_mmio_handler;
-extern struct hvm_mmio_handler vlapic_mmio_handler;
-extern struct hvm_mmio_handler vioapic_mmio_handler;
-extern struct hvm_mmio_handler msixtbl_mmio_handler;
+extern const struct hvm_mmio_handler hpet_mmio_handler;
+extern const struct hvm_mmio_handler vlapic_mmio_handler;
+extern const struct hvm_mmio_handler vioapic_mmio_handler;
+extern const struct hvm_mmio_handler msixtbl_mmio_handler;
#define HVM_MMIO_HANDLER_NR 4
-static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
+static const struct hvm_mmio_handler *const
+hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
{
&hpet_mmio_handler,
&vlapic_mmio_handler,
--- 2009-10-27.orig/xen/arch/x86/hvm/quirks.c 2009-06-29 11:58:15.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/quirks.c 2009-10-27 12:07:32.000000000
+0100
@@ -23,7 +23,7 @@
#include <xen/bitmap.h>
#include <asm/hvm/support.h>
-int hvm_port80_allowed = -1;
+int __read_mostly hvm_port80_allowed = -1;
boolean_param("hvm_port80", hvm_port80_allowed);
static int __init dmi_hvm_deny_port80(/*const*/ struct dmi_system_id *id)
--- 2009-10-27.orig/xen/arch/x86/hvm/svm/svm.c 2009-10-26 14:42:47.000000000
+0100
+++ 2009-10-27/xen/arch/x86/hvm/svm/svm.c 2009-10-27 12:07:32.000000000
+0100
@@ -1233,7 +1233,7 @@ static void svm_invlpg_intercept(unsigne
svm_asid_g_invlpg(curr, vaddr);
}
-static struct hvm_function_table svm_function_table = {
+static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
.cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
--- 2009-10-27.orig/xen/arch/x86/hvm/vioapic.c 2009-09-21 08:39:42.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/vioapic.c 2009-10-27 12:07:32.000000000
+0100
@@ -246,7 +246,7 @@ static int vioapic_range(struct vcpu *v,
(addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
}
-struct hvm_mmio_handler vioapic_mmio_handler = {
+const struct hvm_mmio_handler vioapic_mmio_handler = {
.check_handler = vioapic_range,
.read_handler = vioapic_read,
.write_handler = vioapic_write
--- 2009-10-27.orig/xen/arch/x86/hvm/vlapic.c 2009-10-01 10:53:02.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/vlapic.c 2009-10-27 12:07:32.000000000
+0100
@@ -713,7 +713,7 @@ static int vlapic_range(struct vcpu *v,
return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
}
-struct hvm_mmio_handler vlapic_mmio_handler = {
+const struct hvm_mmio_handler vlapic_mmio_handler = {
.check_handler = vlapic_range,
.read_handler = vlapic_read,
.write_handler = vlapic_write
--- 2009-10-27.orig/xen/arch/x86/hvm/vmsi.c 2009-10-22 08:18:54.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/vmsi.c 2009-10-27 12:07:32.000000000 +0100
@@ -290,7 +290,7 @@ static int msixtbl_range(struct vcpu *v,
return !!virt;
}
-struct hvm_mmio_handler msixtbl_mmio_handler = {
+const struct hvm_mmio_handler msixtbl_mmio_handler = {
.check_handler = msixtbl_range,
.read_handler = msixtbl_read,
.write_handler = msixtbl_write
--- 2009-10-27.orig/xen/arch/x86/hvm/vmx/vmcs.c 2009-10-01 10:53:02.000000000
+0200
+++ 2009-10-27/xen/arch/x86/hvm/vmx/vmcs.c 2009-10-27 12:07:32.000000000
+0100
@@ -38,10 +38,10 @@
#include <asm/shadow.h>
#include <asm/tboot.h>
-static int opt_vpid_enabled = 1;
+static int __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
-static int opt_unrestricted_guest_enabled = 1;
+static int __read_mostly opt_unrestricted_guest_enabled = 1;
boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled);
/*
@@ -53,9 +53,9 @@ boolean_param("unrestricted_guest", opt_
* Time is measured based on a counter that runs at the same rate as the TSC,
* refer SDM volume 3b section 21.6.13 & 22.1.3.
*/
-static unsigned int ple_gap = 41;
+static unsigned int __read_mostly ple_gap = 41;
integer_param("ple_gap", ple_gap);
-static unsigned int ple_window = 4096;
+static unsigned int __read_mostly ple_window = 4096;
integer_param("ple_window", ple_window);
/* Dynamic (run-time adjusted) execution control flags. */
--- 2009-10-27.orig/xen/arch/x86/hvm/vmx/vmx.c 2009-10-26 14:42:47.000000000
+0100
+++ 2009-10-27/xen/arch/x86/hvm/vmx/vmx.c 2009-10-27 12:07:32.000000000
+0100
@@ -1389,7 +1389,7 @@ static void vmx_set_info_guest(struct vc
vmx_vmcs_exit(v);
}
-static struct hvm_function_table vmx_function_table = {
+static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
.domain_initialise = vmx_domain_initialise,
.domain_destroy = vmx_domain_destroy,
--- 2009-10-27.orig/xen/arch/x86/microcode_amd.c 2009-07-13
13:05:40.000000000 +0200
+++ 2009-10-27/xen/arch/x86/microcode_amd.c 2009-10-27 12:07:32.000000000
+0100
@@ -323,7 +323,7 @@ static int microcode_resume_match(int cp
return 0;
}
-static struct microcode_ops microcode_amd_ops = {
+static const struct microcode_ops microcode_amd_ops = {
.microcode_resume_match = microcode_resume_match,
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
--- 2009-10-27.orig/xen/arch/x86/microcode_intel.c 2009-07-13
13:05:40.000000000 +0200
+++ 2009-10-27/xen/arch/x86/microcode_intel.c 2009-10-27 12:07:32.000000000
+0100
@@ -368,7 +368,7 @@ static int microcode_resume_match(int cp
(uci->cpu_sig.rev > nsig->rev));
}
-static struct microcode_ops microcode_intel_ops = {
+static const struct microcode_ops microcode_intel_ops = {
.microcode_resume_match = microcode_resume_match,
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
--- 2009-10-27.orig/xen/arch/x86/mm.c 2009-10-26 14:42:47.000000000 +0100
+++ 2009-10-27/xen/arch/x86/mm.c 2009-10-27 12:07:32.000000000 +0100
@@ -4418,7 +4418,7 @@ static int ptwr_emulated_cmpxchg(
container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
}
-static struct x86_emulate_ops ptwr_emulate_ops = {
+static const struct x86_emulate_ops ptwr_emulate_ops = {
.read = ptwr_emulated_read,
.insn_fetch = ptwr_emulated_read,
.write = ptwr_emulated_write,
--- 2009-10-27.orig/xen/arch/x86/mm/hap/hap.c 2009-10-01 10:53:02.000000000
+0200
+++ 2009-10-27/xen/arch/x86/mm/hap/hap.c 2009-10-27 12:07:32.000000000
+0100
@@ -674,6 +674,11 @@ int hap_domctl(struct domain *d, xen_dom
}
}
+static const struct paging_mode hap_paging_real_mode;
+static const struct paging_mode hap_paging_protected_mode;
+static const struct paging_mode hap_paging_pae_mode;
+static const struct paging_mode hap_paging_long_mode;
+
void hap_vcpu_init(struct vcpu *v)
{
v->arch.paging.mode = &hap_paging_real_mode;
@@ -812,7 +817,7 @@ static unsigned long hap_gva_to_gfn_real
}
/* Entry points into this mode of the hap code. */
-struct paging_mode hap_paging_real_mode = {
+static const struct paging_mode hap_paging_real_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_real_mode,
@@ -822,7 +827,7 @@ struct paging_mode hap_paging_real_mode
.guest_levels = 1
};
-struct paging_mode hap_paging_protected_mode = {
+static const struct paging_mode hap_paging_protected_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_2level,
@@ -832,7 +837,7 @@ struct paging_mode hap_paging_protected_
.guest_levels = 2
};
-struct paging_mode hap_paging_pae_mode = {
+static const struct paging_mode hap_paging_pae_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_3level,
@@ -842,7 +847,7 @@ struct paging_mode hap_paging_pae_mode =
.guest_levels = 3
};
-struct paging_mode hap_paging_long_mode = {
+static const struct paging_mode hap_paging_long_mode = {
.page_fault = hap_page_fault,
.invlpg = hap_invlpg,
.gva_to_gfn = hap_gva_to_gfn_4level,
--- 2009-10-27.orig/xen/arch/x86/mm/shadow/common.c 2009-10-26
14:42:47.000000000 +0100
+++ 2009-10-27/xen/arch/x86/mm/shadow/common.c 2009-10-27 12:07:32.000000000
+0100
@@ -290,7 +290,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg
return X86EMUL_UNHANDLEABLE;
}
-static struct x86_emulate_ops hvm_shadow_emulator_ops = {
+static const struct x86_emulate_ops hvm_shadow_emulator_ops = {
.read = hvm_emulate_read,
.insn_fetch = hvm_emulate_insn_fetch,
.write = hvm_emulate_write,
@@ -367,14 +367,14 @@ pv_emulate_cmpxchg(enum x86_segment seg,
return X86EMUL_UNHANDLEABLE;
}
-static struct x86_emulate_ops pv_shadow_emulator_ops = {
+static const struct x86_emulate_ops pv_shadow_emulator_ops = {
.read = pv_emulate_read,
.insn_fetch = pv_emulate_read,
.write = pv_emulate_write,
.cmpxchg = pv_emulate_cmpxchg,
};
-struct x86_emulate_ops *shadow_init_emulation(
+const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
{
struct segment_register *creg, *sreg;
@@ -2154,7 +2154,7 @@ typedef int (*hash_callback_t)(struct vc
static void hash_foreach(struct vcpu *v,
unsigned int callback_mask,
- hash_callback_t callbacks[],
+ const hash_callback_t callbacks[],
mfn_t callback_mfn)
/* Walk the hash table looking at the types of the entries and
* calling the appropriate callback function for each entry.
@@ -2287,7 +2287,7 @@ int sh_remove_write_access(struct vcpu *
unsigned long fault_addr)
{
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
@@ -2549,7 +2549,7 @@ int sh_remove_all_mappings(struct vcpu *
int expected_count, do_locking;
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */
@@ -2703,7 +2703,7 @@ void sh_remove_shadows(struct vcpu *v, m
/* Dispatch table for getting per-type functions: each level must
* be called with the function to remove a lower-level shadow. */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
NULL, /* l1_32 */
NULL, /* fl1_32 */
@@ -2852,7 +2852,7 @@ sh_remove_all_shadows_and_parents(struct
static void sh_update_paging_modes(struct vcpu *v)
{
struct domain *d = v->domain;
- struct paging_mode *old_mode = v->arch.paging.mode;
+ const struct paging_mode *old_mode = v->arch.paging.mode;
ASSERT(shadow_locked_by_me(d));
@@ -3855,7 +3855,7 @@ int shadow_domctl(struct domain *d,
void shadow_audit_tables(struct vcpu *v)
{
/* Dispatch table for getting per-type functions */
- static hash_callback_t callbacks[SH_type_unused] = {
+ static const hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32 */
--- 2009-10-27.orig/xen/arch/x86/mm/shadow/multi.c 2009-10-22
08:18:54.000000000 +0200
+++ 2009-10-27/xen/arch/x86/mm/shadow/multi.c 2009-10-27 12:07:32.000000000
+0100
@@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v,
shadow_l1e_t sl1e, *ptr_sl1e;
paddr_t gpa;
struct sh_emulate_ctxt emul_ctxt;
- struct x86_emulate_ops *emul_ops;
+ const struct x86_emulate_ops *emul_ops;
int r;
fetch_type_t ft = 0;
p2m_type_t p2mt;
@@ -5188,7 +5188,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
/**************************************************************************/
/* Entry points into this mode of the shadow code.
* This will all be mangled by the preprocessor to uniquify everything. */
-struct paging_mode sh_paging_mode = {
+const struct paging_mode sh_paging_mode = {
.page_fault = sh_page_fault,
.invlpg = sh_invlpg,
.gva_to_gfn = sh_gva_to_gfn,
--- 2009-10-27.orig/xen/arch/x86/mm/shadow/multi.h 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/arch/x86/mm/shadow/multi.h 2009-10-27 12:07:32.000000000
+0100
@@ -113,7 +113,7 @@ extern void
SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, GUEST_LEVELS)
(struct vcpu *v, mfn_t mmfn);
-extern struct paging_mode
+extern const struct paging_mode
SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
--- 2009-10-27.orig/xen/arch/x86/mm/shadow/private.h 2009-09-22
11:02:53.000000000 +0200
+++ 2009-10-27/xen/arch/x86/mm/shadow/private.h 2009-10-27 12:07:32.000000000
+0100
@@ -747,7 +747,7 @@ struct sh_emulate_ctxt {
#endif
};
-struct x86_emulate_ops *shadow_init_emulation(
+const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
void shadow_continue_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
--- 2009-10-27.orig/xen/arch/x86/numa.c 2009-08-07 09:20:56.000000000 +0200
+++ 2009-10-27/xen/arch/x86/numa.c 2009-10-27 12:07:32.000000000 +0100
@@ -39,7 +39,7 @@ unsigned char apicid_to_node[MAX_LOCAL_A
};
cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
/* Default NUMA to off for now. acpi=on required to enable it. */
int numa_off __initdata = 1;
--- 2009-10-27.orig/xen/arch/x86/oprofile/nmi_int.c 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/arch/x86/oprofile/nmi_int.c 2009-10-27 12:07:32.000000000
+0100
@@ -29,7 +29,7 @@
struct op_counter_config counter_config[OP_MAX_COUNTER];
-static struct op_x86_model_spec const * model;
+static struct op_x86_model_spec const *__read_mostly model;
static struct op_msrs cpu_msrs[NR_CPUS];
static unsigned long saved_lvtpc[NR_CPUS];
--- 2009-10-27.orig/xen/arch/x86/oprofile/op_model_p4.c 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/arch/x86/oprofile/op_model_p4.c 2009-10-27
12:07:32.000000000 +0100
@@ -109,7 +109,7 @@ static int p4_unused_cccr[NUM_UNUSED_CCC
/* p4 event codes in libop/op_event.h are indices into this table. */
-static struct p4_event_binding p4_events[NUM_EVENTS] = {
+static const struct p4_event_binding p4_events[NUM_EVENTS] = {
{ /* BRANCH_RETIRED */
0x05, 0x06,
@@ -485,7 +485,7 @@ static void pmc_setup_one_p4_counter(uns
unsigned int escr = 0;
unsigned int high = 0;
unsigned int counter_bit;
- struct p4_event_binding *ev = NULL;
+ const struct p4_event_binding *ev = NULL;
unsigned int stag;
stag = get_stagger();
--- 2009-10-27.orig/xen/arch/x86/oprofile/op_model_ppro.c 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/arch/x86/oprofile/op_model_ppro.c 2009-10-27
12:07:32.000000000 +0100
@@ -309,7 +309,7 @@ void arch_perfmon_setup_counters(void)
op_ppro_spec.num_controls = num_counters;
}
-struct op_x86_model_spec op_ppro_spec = {
+struct op_x86_model_spec __read_mostly op_ppro_spec = {
.num_counters = 2,
.num_controls = 2,
.fill_in_addresses = &ppro_fill_in_addresses,
@@ -324,7 +324,7 @@ struct op_x86_model_spec op_ppro_spec =
.save_msr = &ppro_save_msr
};
-struct op_x86_model_spec op_arch_perfmon_spec = {
+struct op_x86_model_spec __read_mostly op_arch_perfmon_spec = {
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs,
--- 2009-10-27.orig/xen/arch/x86/setup.c 2009-10-15 11:42:12.000000000
+0200
+++ 2009-10-27/xen/arch/x86/setup.c 2009-10-27 12:07:32.000000000 +0100
@@ -55,15 +55,15 @@ extern u8 boot_edid_info[128];
extern struct boot_video_info boot_vid_info;
/* opt_nosmp: If true, secondary processors are ignored. */
-static int opt_nosmp = 0;
+static int __initdata opt_nosmp = 0;
boolean_param("nosmp", opt_nosmp);
/* maxcpus: maximum number of CPUs to activate. */
-static unsigned int max_cpus = NR_CPUS;
+static unsigned int __initdata max_cpus = NR_CPUS;
integer_param("maxcpus", max_cpus);
/* opt_watchdog: If true, run a watchdog NMI on each processor. */
-static int opt_watchdog = 0;
+static int __initdata opt_watchdog = 0;
boolean_param("watchdog", opt_watchdog);
/* opt_tsc_unstable: Override all tests; assume TSC is unreliable. */
@@ -94,13 +94,14 @@ boolean_param("cpuidle", xen_cpuidle);
int early_boot = 1;
-cpumask_t cpu_present_map;
+cpumask_t __read_mostly cpu_present_map;
-unsigned long xen_phys_start;
+unsigned long __read_mostly xen_phys_start;
#ifdef CONFIG_X86_32
/* Limits of Xen heap, used to initialise the allocator. */
-unsigned long xenheap_initial_phys_start, xenheap_phys_end;
+unsigned long __initdata xenheap_initial_phys_start;
+unsigned long __read_mostly xenheap_phys_end;
#endif
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table) =
boot_cpu_gdt_table;
@@ -113,15 +114,14 @@ DEFINE_PER_CPU(struct tss_struct, init_t
char __attribute__ ((__section__(".bss.stack_aligned")))
cpu0_stack[STACK_SIZE];
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
+struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
-unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
-EXPORT_SYMBOL(mmu_cr4_features);
+unsigned long __read_mostly mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE |
X86_CR4_PAE;
-int acpi_disabled;
+int __read_mostly acpi_disabled;
-int acpi_force;
-char acpi_param[10] = "";
+int __read_mostly acpi_force;
+static char __initdata acpi_param[10] = "";
static void __init parse_acpi_param(char *s)
{
/* Save the parameter so it can be propagated to domain0. */
--- 2009-10-27.orig/xen/arch/x86/srat.c 2009-09-22 11:02:53.000000000 +0200
+++ 2009-10-27/xen/arch/x86/srat.c 2009-10-27 12:07:32.000000000 +0100
@@ -20,12 +20,12 @@
#include <asm/e820.h>
#include <asm/page.h>
-static struct acpi_table_slit *acpi_slit;
+static struct acpi_table_slit *__read_mostly acpi_slit;
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
+static u8 __read_mostly pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
--- 2009-10-27.orig/xen/arch/x86/tboot.c 2009-09-22 17:28:54.000000000
+0200
+++ 2009-10-27/xen/arch/x86/tboot.c 2009-10-27 12:07:32.000000000 +0100
@@ -13,7 +13,7 @@
#include <crypto/vmac.h>
/* tboot=<physical address of shared page> */
-static char opt_tboot[20] = "";
+static char __initdata opt_tboot[20] = "";
string_param("tboot", opt_tboot);
/* Global pointer to shared data; NULL means no measured launch. */
@@ -26,8 +26,8 @@ static vmac_t frametable_mac; /* MAC for
static const uuid_t tboot_shared_uuid = TBOOT_SHARED_UUID;
/* used by tboot_protect_mem_regions() and/or tboot_parse_dmar_table() */
-static uint64_t txt_heap_base, txt_heap_size;
-static uint64_t sinit_base, sinit_size;
+static uint64_t __initdata txt_heap_base, __initdata txt_heap_size;
+static uint64_t __initdata sinit_base, __initdata sinit_size;
/*
* TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
--- 2009-10-27.orig/xen/arch/x86/traps.c 2009-10-07 13:31:36.000000000
+0200
+++ 2009-10-27/xen/arch/x86/traps.c 2009-10-27 12:07:32.000000000 +0100
@@ -73,9 +73,9 @@
* ignore: The NMI error is cleared and ignored.
*/
#ifdef NDEBUG
-char opt_nmi[10] = "dom0";
+static char __read_mostly opt_nmi[10] = "dom0";
#else
-char opt_nmi[10] = "fatal";
+static char __read_mostly opt_nmi[10] = "fatal";
#endif
string_param("nmi", opt_nmi);
--- 2009-10-27.orig/xen/arch/x86/x86_32/entry.S 2009-05-20 08:45:59.000000000
+0200
+++ 2009-10-27/xen/arch/x86/x86_32/entry.S 2009-10-27 12:07:32.000000000
+0100
@@ -640,7 +640,7 @@ ENTRY(setup_vm86_frame)
addl $16,%esp
ret
-.data
+.section .rodata, "a", @progbits
ENTRY(exception_table)
.long do_divide_error
--- 2009-10-27.orig/xen/arch/x86/x86_32/mm.c 2009-10-07 13:31:36.000000000
+0200
+++ 2009-10-27/xen/arch/x86/x86_32/mm.c 2009-10-27 12:07:32.000000000 +0100
@@ -36,10 +36,10 @@ l2_pgentry_t __attribute__ ((__section__
extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES];
-unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
-unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
+unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
+unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
-static unsigned long mpt_size;
+static unsigned long __read_mostly mpt_size;
void *alloc_xen_pagetable(void)
{
--- 2009-10-27.orig/xen/arch/x86/x86_64/entry.S 2009-05-20 08:45:59.000000000
+0200
+++ 2009-10-27/xen/arch/x86/x86_64/entry.S 2009-10-27 12:07:32.000000000
+0100
@@ -629,7 +629,7 @@ ENTRY(machine_check)
movl $TRAP_machine_check,4(%rsp)
jmp handle_ist_exception
-.data
+.section .rodata, "a", @progbits
ENTRY(exception_table)
.quad do_divide_error
--- 2009-10-27.orig/xen/arch/x86/x86_64/mm.c 2009-10-15 11:42:12.000000000
+0200
+++ 2009-10-27/xen/arch/x86/x86_64/mm.c 2009-10-27 12:11:22.000000000 +0100
@@ -43,7 +43,7 @@ unsigned long __read_mostly ma_top_mask
unsigned long __read_mostly pfn_hole_mask = 0;
unsigned int __read_mostly pfn_pdx_hole_shift = 0;
-unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
+unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
DEFINE_PER_CPU_READ_MOSTLY(void *, compat_arg_xlat);
--- 2009-10-27.orig/xen/arch/x86/x86_emulate/x86_emulate.c 2009-10-01
10:53:02.000000000 +0200
+++ 2009-10-27/xen/arch/x86/x86_emulate/x86_emulate.c 2009-10-27
12:07:32.000000000 +0100
@@ -715,7 +715,7 @@ static int read_ulong(
unsigned long *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
*val = 0;
return ops->read(seg, offset, val, bytes, ctxt);
@@ -848,7 +848,7 @@ test_cc(
static int
get_cpl(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register reg;
@@ -865,7 +865,7 @@ get_cpl(
static int
_mode_iopl(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
int cpl = get_cpl(ctxt, ops);
if ( cpl == -1 )
@@ -888,7 +888,7 @@ static int ioport_access_check(
unsigned int first_port,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
unsigned long iobmp;
struct segment_register tr;
@@ -933,7 +933,7 @@ static int ioport_access_check(
static int
in_realmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
unsigned long cr0;
int rc;
@@ -948,7 +948,7 @@ in_realmode(
static int
in_protmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
return !(in_realmode(ctxt, ops) || (ctxt->regs->eflags & EFLG_VM));
}
@@ -956,7 +956,7 @@ in_protmode(
static int
in_longmode(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
uint64_t efer;
@@ -972,7 +972,7 @@ realmode_load_seg(
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register reg;
int rc;
@@ -991,7 +991,7 @@ protmode_load_seg(
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
struct segment_register desctab, ss, segr;
struct { uint32_t a, b; } desc;
@@ -1130,7 +1130,7 @@ load_seg(
enum x86_segment seg,
uint16_t sel,
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
if ( (ops->read_segment == NULL) ||
(ops->write_segment == NULL) )
@@ -1202,7 +1202,7 @@ decode_segment(uint8_t modrm_reg)
int
x86_emulate(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+ const struct x86_emulate_ops *ops)
{
/* Shadow copy of register state. Committed on successful emulation. */
struct cpu_user_regs _regs = *ctxt->regs;
--- 2009-10-27.orig/xen/arch/x86/x86_emulate/x86_emulate.h 2008-12-10
09:14:08.000000000 +0100
+++ 2009-10-27/xen/arch/x86/x86_emulate/x86_emulate.h 2009-10-27
12:07:32.000000000 +0100
@@ -395,7 +395,7 @@ struct x86_emulate_ctxt
int
x86_emulate(
struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops);
+ const struct x86_emulate_ops *ops);
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
--- 2009-10-27.orig/xen/common/lib.c 2009-06-29 11:58:15.000000000 +0200
+++ 2009-10-27/xen/common/lib.c 2009-10-27 12:07:32.000000000 +0100
@@ -5,7 +5,7 @@
#include <asm/byteorder.h>
/* for ctype.h */
-unsigned char _ctype[] = {
+const unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
--- 2009-10-27.orig/xen/common/libelf/libelf-dominfo.c 2009-01-14
09:47:07.000000000 +0100
+++ 2009-10-27/xen/common/libelf/libelf-dominfo.c 2009-10-27
12:07:32.000000000 +0100
@@ -7,14 +7,14 @@
/* ------------------------------------------------------------------------ */
/* xen features */
-const char *elf_xen_feature_names[] = {
+static const char *const elf_xen_feature_names[] = {
[XENFEAT_writable_page_tables] = "writable_page_tables",
[XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
[XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
[XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
[XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
};
-const int elf_xen_features =
+static const int elf_xen_features =
sizeof(elf_xen_feature_names) / sizeof(elf_xen_feature_names[0]);
int elf_xen_parse_features(const char *features,
--- 2009-10-27.orig/xen/common/sched_credit.c 2009-10-02 11:40:50.000000000
+0200
+++ 2009-10-27/xen/common/sched_credit.c 2009-10-27 12:07:32.000000000
+0100
@@ -1375,7 +1375,7 @@ static void csched_tick_resume(void)
- now % MILLISECS(CSCHED_MSECS_PER_TICK) );
}
-struct scheduler sched_credit_def = {
+const struct scheduler sched_credit_def = {
.name = "SMP Credit Scheduler",
.opt_name = "credit",
.sched_id = XEN_SCHEDULER_CREDIT,
--- 2009-10-27.orig/xen/common/sched_sedf.c 2009-06-19 11:11:23.000000000
+0200
+++ 2009-10-27/xen/common/sched_sedf.c 2009-10-27 12:07:32.000000000 +0100
@@ -1456,7 +1456,7 @@ static int sedf_adjust(struct domain *p,
return 0;
}
-struct scheduler sched_sedf_def = {
+const struct scheduler sched_sedf_def = {
.name = "Simple EDF Scheduler",
.opt_name = "sedf",
.sched_id = XEN_SCHEDULER_SEDF,
--- 2009-10-27.orig/xen/common/schedule.c 2009-08-31 15:09:04.000000000
+0200
+++ 2009-10-27/xen/common/schedule.c 2009-10-27 12:07:32.000000000 +0100
@@ -56,15 +56,15 @@ static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU(struct schedule_data, schedule_data);
-extern struct scheduler sched_sedf_def;
-extern struct scheduler sched_credit_def;
-static struct scheduler *schedulers[] = {
+extern const struct scheduler sched_sedf_def;
+extern const struct scheduler sched_credit_def;
+static const struct scheduler *__initdata schedulers[] = {
&sched_sedf_def,
&sched_credit_def,
NULL
};
-static struct scheduler ops;
+static struct scheduler __read_mostly ops;
#define SCHED_OP(fn, ...) \
(( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
--- 2009-10-27.orig/xen/drivers/char/console.c 2009-10-07 13:31:36.000000000
+0200
+++ 2009-10-27/xen/drivers/char/console.c 2009-10-27 12:07:32.000000000
+0100
@@ -44,19 +44,19 @@ string_param("console", opt_console);
/* Char 1: CTRL+<char1> is used to switch console input between Xen and DOM0 */
/* Char 2: If this character is 'x', then do not auto-switch to DOM0 when it */
/* boots. Any other value, or omitting the char, enables auto-switch */
-static unsigned char opt_conswitch[3] = "a";
+static unsigned char __read_mostly opt_conswitch[3] = "a";
string_param("conswitch", opt_conswitch);
/* sync_console: force synchronous console output (useful for debugging). */
-static int opt_sync_console;
+static int __read_mostly opt_sync_console;
boolean_param("sync_console", opt_sync_console);
/* console_to_ring: send guest (incl. dom 0) console data to console ring. */
-static int opt_console_to_ring;
+static int __read_mostly opt_console_to_ring;
boolean_param("console_to_ring", opt_console_to_ring);
/* console_timestamps: include a timestamp prefix on every Xen console line. */
-static int opt_console_timestamps;
+static int __read_mostly opt_console_timestamps;
boolean_param("console_timestamps", opt_console_timestamps);
/* conring_size: allows a large console ring than default (16kB). */
@@ -65,10 +65,11 @@ size_param("conring_size", opt_conring_s
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char _conring[_CONRING_SIZE], *conring = _conring;
-static uint32_t conring_size = _CONRING_SIZE, conringc, conringp;
+static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
+static uint32_t __read_mostly conring_size = _CONRING_SIZE;
+static uint32_t conringc, conringp;
-static int sercon_handle = -1;
+static int __read_mostly sercon_handle = -1;
static DEFINE_SPINLOCK(console_lock);
@@ -103,10 +104,10 @@ static DEFINE_SPINLOCK(console_lock);
#define XENLOG_DEFAULT 1 /* XENLOG_WARNING */
#define XENLOG_GUEST_DEFAULT 1 /* XENLOG_WARNING */
-static int xenlog_upper_thresh = XENLOG_UPPER_THRESHOLD;
-static int xenlog_lower_thresh = XENLOG_LOWER_THRESHOLD;
-static int xenlog_guest_upper_thresh = XENLOG_GUEST_UPPER_THRESHOLD;
-static int xenlog_guest_lower_thresh = XENLOG_GUEST_LOWER_THRESHOLD;
+static int __read_mostly xenlog_upper_thresh = XENLOG_UPPER_THRESHOLD;
+static int __read_mostly xenlog_lower_thresh = XENLOG_LOWER_THRESHOLD;
+static int __read_mostly xenlog_guest_upper_thresh =
XENLOG_GUEST_UPPER_THRESHOLD;
+static int __read_mostly xenlog_guest_lower_thresh =
XENLOG_GUEST_LOWER_THRESHOLD;
static void parse_loglvl(char *s);
static void parse_guest_loglvl(char *s);
@@ -273,7 +274,7 @@ static void sercon_puts(const char *s)
/* CTRL-<switch_char> switches input direction between Xen and DOM0. */
#define switch_code (opt_conswitch[0]-'a'+1)
-static int xen_rx = 1; /* FALSE => serial input passed to domain 0. */
+static int __read_mostly xen_rx = 1; /* FALSE => serial input passed to domain
0. */
static void switch_serial_input(void)
{
@@ -773,10 +774,10 @@ int __printk_ratelimit(int ratelimit_ms,
}
/* minimum time in ms between messages */
-int printk_ratelimit_ms = 5 * 1000;
+static int __read_mostly printk_ratelimit_ms = 5 * 1000;
/* number of messages we send before ratelimiting */
-int printk_ratelimit_burst = 10;
+static int __read_mostly printk_ratelimit_burst = 10;
int printk_ratelimit(void)
{
--- 2009-10-27.orig/xen/drivers/char/ns16550.c 2009-08-31 15:09:04.000000000
+0200
+++ 2009-10-27/xen/drivers/char/ns16550.c 2009-10-27 12:07:32.000000000
+0100
@@ -272,7 +272,7 @@ static int ns16550_irq(struct serial_por
return ((uart->irq > 0) ? uart->irq : -1);
}
-static struct uart_driver ns16550_driver = {
+static struct uart_driver __read_mostly ns16550_driver = {
.init_preirq = ns16550_init_preirq,
.init_postirq = ns16550_init_postirq,
.endboot = ns16550_endboot,
--- 2009-10-27.orig/xen/drivers/char/serial.c 2009-08-31 15:09:04.000000000
+0200
+++ 2009-10-27/xen/drivers/char/serial.c 2009-10-27 12:07:32.000000000
+0100
@@ -18,7 +18,7 @@
/* Never drop characters, even if the async transmit buffer fills. */
/* #define SERIAL_NEVER_DROP_CHARS 1 */
-unsigned int serial_txbufsz = 16384;
+unsigned int __read_mostly serial_txbufsz = 16384;
size_param("serial_tx_buffer", serial_txbufsz);
#define mask_serial_rxbuf_idx(_i) ((_i)&(serial_rxbufsz-1))
--- 2009-10-27.orig/xen/drivers/passthrough/amd/pci_amd_iommu.c 2009-10-07
13:31:36.000000000 +0200
+++ 2009-10-27/xen/drivers/passthrough/amd/pci_amd_iommu.c 2009-10-27
12:07:32.000000000 +0100
@@ -430,7 +430,7 @@ static int amd_iommu_group_id(u8 bus, u8
return rt;
}
-struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
.init = amd_iommu_domain_init,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
--- 2009-10-27.orig/xen/drivers/passthrough/iommu.c 2009-10-22
08:18:54.000000000 +0200
+++ 2009-10-27/xen/drivers/passthrough/iommu.c 2009-10-27 12:07:32.000000000
+0100
@@ -292,7 +292,7 @@ int iommu_get_device_group(struct domain
int group_id, sdev_id;
u32 bdf;
int i = 0;
- struct iommu_ops *ops = hd->platform_ops;
+ const struct iommu_ops *ops = hd->platform_ops;
if ( !iommu_enabled || !ops || !ops->get_device_group_id )
return 0;
@@ -327,39 +327,39 @@ int iommu_get_device_group(struct domain
void iommu_update_ire_from_apic(
unsigned int apic, unsigned int reg, unsigned int value)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->update_ire_from_apic(apic, reg, value);
}
void iommu_update_ire_from_msi(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->update_ire_from_msi(msi_desc, msg);
}
void iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
ops->read_msi_from_ire(msi_desc, msg);
}
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
return ops->read_apic_from_ire(apic, reg);
}
void iommu_resume()
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
if ( iommu_enabled )
ops->resume();
}
void iommu_suspend()
{
- struct iommu_ops *ops = iommu_get_ops();
+ const struct iommu_ops *ops = iommu_get_ops();
if ( iommu_enabled )
ops->suspend();
}
--- 2009-10-27.orig/xen/drivers/passthrough/vtd/iommu.c 2009-10-27
12:04:10.000000000 +0100
+++ 2009-10-27/xen/drivers/passthrough/vtd/iommu.c 2009-10-27
12:07:32.000000000 +0100
@@ -1944,7 +1944,7 @@ static void vtd_resume(void)
}
}
-struct iommu_ops intel_iommu_ops = {
+const struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
--- 2009-10-27.orig/xen/include/asm-ia64/hvm/iommu.h 2009-03-31
17:55:50.000000000 +0200
+++ 2009-10-27/xen/include/asm-ia64/hvm/iommu.h 2009-10-27 12:07:32.000000000
+0100
@@ -8,7 +8,7 @@
#include <asm/iosapic.h>
struct iommu_ops;
-extern struct iommu_ops intel_iommu_ops;
+extern const struct iommu_ops intel_iommu_ops;
extern int intel_vtd_setup(void);
#define iommu_get_ops() (&intel_iommu_ops)
--- 2009-10-27.orig/xen/include/asm-x86/domain.h 2009-10-22
08:18:54.000000000 +0200
+++ 2009-10-27/xen/include/asm-x86/domain.h 2009-10-27 12:07:32.000000000
+0100
@@ -187,7 +187,7 @@ struct paging_domain {
struct paging_vcpu {
/* Pointers to mode-specific entry points. */
- struct paging_mode *mode;
+ const struct paging_mode *mode;
/* HVM guest: last emulate was to a pagetable */
unsigned int last_write_was_pt:1;
/* HVM guest: last write emulation succeeds */
--- 2009-10-27.orig/xen/include/asm-x86/genapic.h 2009-09-07
13:22:47.000000000 +0200
+++ 2009-10-27/xen/include/asm-x86/genapic.h 2009-10-27 12:07:32.000000000
+0100
@@ -18,7 +18,7 @@ struct mp_config_table;
struct mpc_config_processor;
struct genapic {
- char *name;
+ const char *name;
int (*probe)(void);
/* When one of the next two hooks returns 1 the genapic
@@ -48,9 +48,9 @@ struct genapic {
APICFUNC(mps_oem_check), \
APICFUNC(acpi_madt_oem_check)
-extern struct genapic *genapic;
-extern struct genapic apic_x2apic_phys;
-extern struct genapic apic_x2apic_cluster;
+extern const struct genapic *genapic;
+extern const struct genapic apic_x2apic_phys;
+extern const struct genapic apic_x2apic_cluster;
void init_apic_ldr_flat(void);
void clustered_apic_check_flat(void);
--- 2009-10-27.orig/xen/include/asm-x86/hap.h 2009-06-05 11:59:48.000000000
+0200
+++ 2009-10-27/xen/include/asm-x86/hap.h 2009-10-27 12:07:32.000000000
+0100
@@ -97,11 +97,6 @@ int hap_track_dirty_vram(struct domain
unsigned long nr,
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
-extern struct paging_mode hap_paging_real_mode;
-extern struct paging_mode hap_paging_protected_mode;
-extern struct paging_mode hap_paging_pae_mode;
-extern struct paging_mode hap_paging_long_mode;
-
#endif /* XEN_HAP_H */
/*
--- 2009-10-27.orig/xen/include/asm-x86/hvm/iommu.h 2008-10-17
08:29:02.000000000 +0200
+++ 2009-10-27/xen/include/asm-x86/hvm/iommu.h 2009-10-27 12:07:32.000000000
+0100
@@ -2,12 +2,12 @@
#define __ASM_X86_HVM_IOMMU_H__
struct iommu_ops;
-extern struct iommu_ops intel_iommu_ops;
-extern struct iommu_ops amd_iommu_ops;
+extern const struct iommu_ops intel_iommu_ops;
+extern const struct iommu_ops amd_iommu_ops;
extern int intel_vtd_setup(void);
extern int amd_iov_detect(void);
-static inline struct iommu_ops *iommu_get_ops(void)
+static inline const struct iommu_ops *iommu_get_ops(void)
{
switch ( boot_cpu_data.x86_vendor )
{
--- 2009-10-27.orig/xen/include/xen/ctype.h 2005-11-17 15:51:06.000000000
+0100
+++ 2009-10-27/xen/include/xen/ctype.h 2009-10-27 12:07:32.000000000 +0100
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
--- 2009-10-27.orig/xen/include/xen/hvm/iommu.h 2009-09-07 13:22:47.000000000
+0200
+++ 2009-10-27/xen/include/xen/hvm/iommu.h 2009-10-27 12:07:32.000000000
+0100
@@ -43,7 +43,7 @@ struct hvm_iommu {
struct page_info *root_table;
/* iommu_ops */
- struct iommu_ops *platform_ops;
+ const struct iommu_ops *platform_ops;
};
#endif /* __XEN_HVM_IOMMU_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|