In order to have better chance that relevant messages fit into the
ring buffer, allocate a dynamic (larger) one in more cases, and make
the default allocation size depend on both the number of CPUs and the
log level. Also free the static buffer if a dynamic one was obtained.
In order for "xm dmesg" to retrieve larger buffers, eliminate
pyxc_readconsolering()'s 32k limitation resulting from the use of a
statically allocated buffer.
Finally, suppress on x86 most per-CPU boot time messages (by default,
most of them can be re-enabled with a new command line option
"cpuinfo", some others are now only printed more than once when there
are inconsistencies between CPUs). This reduces both boot time (namely
when a graphical console is in use) and pressure on the console ring
and serial transmit buffers.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- 2010-03-02.orig/tools/python/xen/lowlevel/xc/xc.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/tools/python/xen/lowlevel/xc/xc.c 2010-03-16
16:29:22.000000000 +0100
@@ -1062,14 +1062,16 @@ static PyObject *pyxc_readconsolering(Xc
PyObject *kwds)
{
unsigned int clear = 0, index = 0, incremental = 0;
- char _str[32768], *str = _str;
- unsigned int count = 32768;
+ unsigned int count = 16384 + 1, size = count;
+ char *str = malloc(size), *ptr;
+ PyObject *obj;
int ret;
static char *kwd_list[] = { "clear", "index", "incremental", NULL };
if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwd_list,
- &clear, &index, &incremental) )
+ &clear, &index, &incremental) ||
+ !str )
return NULL;
ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
@@ -1077,7 +1079,30 @@ static PyObject *pyxc_readconsolering(Xc
if ( ret < 0 )
return pyxc_error_to_exception();
- return PyString_FromStringAndSize(str, count);
+ while ( !incremental && count == size )
+ {
+ size += count - 1;
+ if ( size < count )
+ break;
+
+ ptr = realloc(str, size);
+ if ( !ptr )
+ break;
+
+ str = ptr + count;
+ count = size - count;
+ ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
+ 1, &index);
+ if ( ret < 0 )
+ break;
+
+ count += str - ptr;
+ str = ptr;
+ }
+
+ obj = PyString_FromStringAndSize(str, count);
+ free(str);
+ return obj;
}
--- 2010-03-02.orig/xen/arch/ia64/xen/xen.lds.S 2009-07-13 13:05:40.000000000
+0200
+++ 2010-03-02/xen/arch/ia64/xen/xen.lds.S 2010-03-16 16:43:48.000000000
+0100
@@ -223,7 +223,12 @@ SECTIONS
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - LOAD_OFFSET)
- { *(.bss) *(COMMON) }
+ {
+ . = ALIGN(PAGE_SIZE);
+ *(.bss.page_aligned)
+ *(.bss)
+ *(COMMON)
+ }
_end = .;
--- 2010-03-02.orig/xen/arch/x86/cpu/amd.c 2010-03-16 16:18:34.000000000
+0100
+++ 2010-03-02/xen/arch/x86/cpu/amd.c 2010-03-16 10:54:10.000000000 +0100
@@ -493,8 +493,9 @@ static void __devinit init_amd(struct cp
}
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
- printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
- cpu, c->x86_max_cores, cpu_core_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU %d(%d) -> Core %d\n",
+ cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
--- 2010-03-02.orig/xen/arch/x86/cpu/common.c 2010-03-16 16:18:34.000000000
+0100
+++ 2010-03-02/xen/arch/x86/cpu/common.c 2010-03-16 10:53:55.000000000
+0100
@@ -59,6 +59,9 @@ static struct cpu_dev * this_cpu = &defa
integer_param("cachesize", cachesize_override);
+int __cpuinitdata opt_cpu_info;
+boolean_param("cpuinfo", opt_cpu_info);
+
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
@@ -97,8 +100,10 @@ void __cpuinit display_cacheinfo(struct
if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache
%dK (%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L1 I cache %dK (%d bytes/line),"
+ " D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size=(ecx>>24)+(edx>>24);
}
@@ -121,8 +126,9 @@ void __cpuinit display_cacheinfo(struct
c->x86_cache_size = l2size;
- printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
}
/* Naming convention should be: <Name> [(<Codename>)] */
@@ -495,8 +501,9 @@ void __cpuinit detect_ht(struct cpuinfo_
index_msb = get_count_order(c->x86_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
@@ -507,16 +514,22 @@ void __cpuinit detect_ht(struct cpuinfo_
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ if (opt_cpu_info && c->x86_max_cores > 1)
+ printk("CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
}
#endif
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+void __cpuinit print_cpu_info(unsigned int cpu)
{
- char *vendor = NULL;
+ const struct cpuinfo_x86 *c = cpu_data + cpu;
+ const char *vendor = NULL;
+
+ if (!opt_cpu_info)
+ return;
+
+ printk("CPU%u: ", cpu);
if (c->x86_vendor < X86_VENDOR_NUM)
vendor = this_cpu->c_vendor;
@@ -578,7 +591,8 @@ void __cpuinit cpu_init(void)
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ if (opt_cpu_info)
+ printk("Initializing CPU#%d\n", cpu);
if (cpu_has_pat)
wrmsrl(MSR_IA32_CR_PAT, host_pat);
--- 2010-03-02.orig/xen/arch/x86/cpu/intel_cacheinfo.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/intel_cacheinfo.c 2010-03-16
10:54:50.000000000 +0100
@@ -415,21 +415,23 @@ unsigned int __cpuinit init_intel_cachei
l3 = new_l3;
}
- if (trace)
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
-
- if (l1d)
- printk(", L1 D cache: %dK\n", l1d);
- else
- printk("\n");
+ if (opt_cpu_info) {
+ if (trace)
+ printk("CPU: Trace cache: %dK uops", trace);
+ else if ( l1i )
+ printk("CPU: L1 I cache: %dK", l1i);
+
+ if (l1d)
+ printk(", L1 D cache: %dK\n", l1d);
+ else
+ printk("\n");
- if (l2)
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+ if (l2)
+ printk("CPU: L2 cache: %dK\n", l2);
- if (l3)
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+ if (l3)
+ printk("CPU: L3 cache: %dK\n", l3);
+ }
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/amd_f10.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/amd_f10.c 2010-03-16
09:59:18.000000000 +0100
@@ -83,15 +83,12 @@ amd_f10_handler(struct mc_info *mi, uint
}
/* AMD Family10 machine check */
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!amd_k8_mcheck_init(c))
- return 0;
+ if (amd_k8_mcheck_init(c) == mcheck_none)
+ return mcheck_none;
x86_mce_callback_register(amd_f10_handler);
- printk("CPU%i: AMD Family%xh machine check reporting enabled\n",
- smp_processor_id(), c->x86);
-
- return 1;
+ return mcheck_amd_famXX;
}
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/amd_k8.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/amd_k8.c 2010-03-16 09:28:25.000000000
+0100
@@ -76,14 +76,14 @@ static void k8_machine_check(struct cpu_
}
/* AMD K8 machine check */
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c)
{
uint32_t i;
enum mcequirk_amd_flags quirkflag;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
quirkflag = mcequirk_lookup_amd_quirkdata(c);
@@ -102,9 +102,6 @@ int amd_k8_mcheck_init(struct cpuinfo_x8
}
set_in_cr4(X86_CR4_MCE);
- if (c->x86 < 0x10 || c->x86 > 0x11)
- printk("CPU%i: AMD K8 machine check reporting enabled\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k8;
}
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/k7.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/k7.c 2010-03-16 09:28:04.000000000
+0100
@@ -68,14 +68,14 @@ static fastcall void k7_machine_check(st
/* AMD K7 machine check */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
x86_mce_vector_register(k7_machine_check);
@@ -93,8 +93,6 @@ int amd_k7_mcheck_init(struct cpuinfo_x8
}
set_in_cr4 (X86_CR4_MCE);
- printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k7;
}
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/mce.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/mce.c 2010-03-16 09:59:41.000000000
+0100
@@ -562,9 +562,9 @@ void mcheck_mca_clearbanks(cpu_banks_t b
}
}
-static int amd_mcheck_init(struct cpuinfo_x86 *ci)
+static enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci)
{
- int rc = 0;
+ enum mcheck_type rc = mcheck_none;
switch (ci->x86) {
case 6:
@@ -628,7 +628,9 @@ int mce_firstbank(struct cpuinfo_x86 *c)
/* This has to be run for each processor */
void mcheck_init(struct cpuinfo_x86 *c)
{
- int inited = 0, i, broadcast;
+ int i, broadcast;
+ enum mcheck_type inited = mcheck_none;
+ static enum mcheck_type g_type = mcheck_unset;
static int broadcast_check;
if (mce_disabled == 1) {
@@ -694,9 +696,37 @@ void mcheck_init(struct cpuinfo_x86 *c)
if (g_mcg_cap & MCG_CTL_P)
rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
set_poll_bankmask(c);
- if (!inited)
- printk(XENLOG_INFO "CPU%i: No machine check initialization\n",
- smp_processor_id());
+
+ if (inited != g_type) {
+ char prefix[20];
+ static const char *const type_str[] = {
+ [mcheck_amd_famXX] = "AMD",
+ [mcheck_amd_k7] = "AMD K7",
+ [mcheck_amd_k8] = "AMD K8",
+ [mcheck_intel] = "Intel"
+ };
+
+ snprintf(prefix, ARRAY_SIZE(prefix),
+ g_type != mcheck_unset ? XENLOG_WARNING "CPU%i: "
+ : XENLOG_INFO,
+ smp_processor_id());
+ BUG_ON(inited >= ARRAY_SIZE(type_str));
+ switch (inited) {
+ default:
+ printk("%s%s machine check reporting enabled\n",
+ prefix, type_str[inited]);
+ break;
+ case mcheck_amd_famXX:
+ printk("%s%s Fam%xh machine check reporting enabled\n",
+ prefix, type_str[inited], c->x86);
+ break;
+ case mcheck_none:
+ printk("%sNo machine check initialization\n", prefix);
+ break;
+ }
+
+ g_type = inited;
+ }
}
u64 mce_cap_init(void)
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/mce.h 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/mce.h 2010-03-16 09:25:22.000000000
+0100
@@ -28,13 +28,21 @@ extern int mce_verbosity;
printk(s, ##a); \
} while (0)
+enum mcheck_type {
+ mcheck_unset = -1,
+ mcheck_none,
+ mcheck_amd_famXX,
+ mcheck_amd_k7,
+ mcheck_amd_k8,
+ mcheck_intel
+};
/* Init functions */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c);
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c);
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c);
-int intel_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c);
void intel_mcheck_timer(struct cpuinfo_x86 *c);
void mce_intel_feature_init(struct cpuinfo_x86 *c);
--- 2010-03-02.orig/xen/arch/x86/cpu/mcheck/mce_intel.c 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/arch/x86/cpu/mcheck/mce_intel.c 2010-03-16
17:11:44.000000000 +0100
@@ -141,8 +141,9 @@ static void intel_init_thermal(struct cp
l = apic_read (APIC_LVTTHMR);
apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk (KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
+ if (opt_cpu_info)
+ printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n",
+ cpu, tm2 ? "TM2" : "TM1");
return;
}
#endif /* CONFIG_X86_MCE_THERMAL */
@@ -946,7 +947,8 @@ static void intel_init_cmci(struct cpuin
int cpu = smp_processor_id();
if (!mce_available(c) || !cmci_support) {
- mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
+ if (opt_cpu_info)
+ mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
return;
}
@@ -1068,11 +1070,9 @@ static void mce_init(void)
}
/* p4/p6 family have similar MCA initialization process */
-int intel_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c)
{
_mce_cap_init(c);
- mce_printk(MCE_QUIET, "Intel machine check reporting enabled on CPU#%d.\n",
- smp_processor_id());
/* machine check is available */
x86_mce_vector_register(intel_machine_check);
@@ -1085,7 +1085,7 @@ int intel_mcheck_init(struct cpuinfo_x86
mce_set_owner();
open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
- return 1;
+ return mcheck_intel;
}
int intel_mce_wrmsr(uint32_t msr, uint64_t val)
--- 2010-03-02.orig/xen/arch/x86/hvm/asid.c 2010-03-16 16:18:34.000000000
+0100
+++ 2010-03-02/xen/arch/x86/hvm/asid.c 2010-03-15 17:41:13.000000000 +0100
@@ -59,6 +59,7 @@ static DEFINE_PER_CPU(struct hvm_asid_da
void hvm_asid_init(int nasids)
{
+ static s8 g_disabled = -1;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
/*
@@ -72,8 +73,12 @@ void hvm_asid_init(int nasids)
data->max_asid = nasids - 1;
data->disabled = (nasids <= 1);
- printk("HVM: ASIDs %s \n",
- (data->disabled ? "disabled." : "enabled."));
+ if ( g_disabled != data->disabled )
+ {
+ printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
+ if ( g_disabled < 0 )
+ g_disabled = data->disabled;
+ }
/* Zero indicates 'invalid generation', so we start the count at one. */
data->core_asid_generation = 1;
--- 2010-03-02.orig/xen/arch/x86/setup.c 2010-03-16 16:18:34.000000000
+0100
+++ 2010-03-02/xen/arch/x86/setup.c 2010-03-16 10:55:07.000000000 +0100
@@ -269,8 +269,8 @@ void __devinit srat_detect_node(int cpu)
node = 0;
numa_set_node(cpu, node);
- if ( acpi_numa > 0 )
- printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
+ if ( opt_cpu_info && acpi_numa > 0 )
+ printk("CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
}
/*
--- 2010-03-02.orig/xen/arch/x86/smpboot.c 2010-03-16 16:18:34.000000000
+0100
+++ 2010-03-02/xen/arch/x86/smpboot.c 2010-03-16 09:16:52.000000000 +0100
@@ -877,7 +877,9 @@ static int __devinit do_boot_cpu(int api
start_eip = setup_trampoline();
/* So we see what's up */
- printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+ if (opt_cpu_info)
+ printk("Booting processor %d/%d eip %lx\n",
+ cpu, apicid, start_eip);
stack_start.esp = prepare_idle_stack(cpu);
@@ -960,8 +962,7 @@ static int __devinit do_boot_cpu(int api
if (cpu_isset(cpu, cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
- printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(cpu);
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
@@ -1062,8 +1063,7 @@ static void __init smp_boot_cpus(unsigne
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
- printk("CPU%d: ", 0);
- print_cpu_info(&cpu_data[0]);
+ print_cpu_info(0);
boot_cpu_physical_apicid = get_apic_id();
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
--- 2010-03-02.orig/xen/drivers/char/console.c 2010-03-16 16:18:39.000000000
+0100
+++ 2010-03-02/xen/drivers/char/console.c 2010-03-16 16:41:10.000000000
+0100
@@ -65,7 +65,12 @@ size_param("conring_size", opt_conring_s
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
+static char
+#if _CONRING_SIZE >= PAGE_SIZE
+ __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
+#endif
+ _conring[_CONRING_SIZE];
+static char *__read_mostly conring = _conring;
static uint32_t __read_mostly conring_size = _CONRING_SIZE;
static uint32_t conringc, conringp;
@@ -595,6 +600,8 @@ void __init console_init_postirq(void)
serial_init_postirq();
+ if ( !opt_conring_size )
+ opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
/* Round size down to a power of two. */
while ( opt_conring_size & (opt_conring_size - 1) )
opt_conring_size &= opt_conring_size - 1;
@@ -618,6 +625,8 @@ void __init console_init_postirq(void)
spin_unlock_irq(&console_lock);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
+
+ init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
}
void __init console_endboot(void)
--- 2010-03-02.orig/xen/include/asm-x86/processor.h 2010-03-16
16:18:34.000000000 +0100
+++ 2010-03-02/xen/include/asm-x86/processor.h 2010-03-15 17:55:08.000000000
+0100
@@ -194,10 +194,11 @@ extern struct cpuinfo_x86 cpu_data[];
extern u64 host_pat;
extern int phys_proc_id[NR_CPUS];
extern int cpu_core_id[NR_CPUS];
+extern int opt_cpu_info;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void setup_clear_cpu_cap(unsigned int);
-extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void print_cpu_info(unsigned int cpu);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void dodgy_tsc(void);
default-buffer-sizes.patch
Description: Text document
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|