- make log levels used consistent in a few places
- remove trailing newlines, dots, and commas
- remove explictly specified function names from message text
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2007-05-14/xen/arch/x86/hvm/svm/emulate.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/emulate.c 2007-04-23
10:01:41.000000000 +0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/emulate.c 2007-05-14 14:33:28.000000000
+0200
@@ -145,9 +145,8 @@ unsigned long get_effective_addr_modrm64
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- HVM_DBG_LOG(DBG_LEVEL_1, "get_effective_addr_modrm64(): prefix = %x, "
- "length = %d, operand[0,1] = %x %x.\n", prefix, *size, operand [0],
- operand [1]);
+ HVM_DBG_LOG(DBG_LEVEL_1, "prefix = %x, length = %d, operand[0,1] = %x %x",
+ prefix, *size, operand[0], operand[1]);
if ((NULL == size) || (NULL == operand) || (1 > *size))
{
Index: 2007-05-14/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/svm.c 2007-05-14 14:33:08.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/svm.c 2007-05-14 14:33:28.000000000
+0200
@@ -135,7 +135,7 @@ static inline int long_mode_do_msr_write
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
ecx, msr_content);
switch ( ecx )
@@ -394,7 +394,7 @@ int svm_vmcb_restore(struct vcpu *v, str
* If different, make a shadow. Check if the PDBR is valid
* first.
*/
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
@@ -1532,7 +1532,7 @@ static int svm_set_cr0(unsigned long val
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long old_base_mfn;
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
/* ET is reserved and should be always be 1. */
value |= X86_CR0_ET;
@@ -1557,11 +1557,11 @@ static int svm_set_cr0(unsigned long val
{
if ( !svm_cr4_pae_is_set(v) )
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}
@@ -1654,7 +1654,7 @@ static void mov_from_cr(int cr, int gp,
set_reg(gp, value, regs, vmcb);
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
}
@@ -1672,8 +1672,8 @@ static int mov_to_cr(int gpreg, int cr,
HVMTRACE_2D(CR_WRITE, v, cr, value);
- HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
- HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
+ HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx, current = %p",
+ cr, value, v);
switch ( cr )
{
Index: 2007-05-14/xen/arch/x86/hvm/vioapic.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/vioapic.c 2007-04-23 10:01:41.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/vioapic.c 2007-05-14 14:33:28.000000000
+0200
@@ -99,7 +99,7 @@ static unsigned long vioapic_read(struct
struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
uint32_t result;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_read addr %lx\n", addr);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr);
addr &= 0xff;
@@ -183,8 +183,7 @@ static void vioapic_write_indirect(
{
uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_write_indirect "
- "change redir index %x val %lx\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "change redir index %x val %lx",
redir_index, val);
if ( redir_index >= VIOAPIC_NUM_PINS )
@@ -252,8 +251,7 @@ static void ioapic_inj_irq(
uint8_t trig_mode,
uint8_t delivery_mode)
{
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
- "irq %d trig %d delive mode %d\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
vector, trig_mode, delivery_mode);
switch ( delivery_mode )
@@ -275,8 +273,8 @@ static uint32_t ioapic_get_delivery_bitm
uint32_t mask = 0;
struct vcpu *v;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
- "dest %d dest_mode %d\n", dest, dest_mode);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "dest %d dest_mode %d",
+ dest, dest_mode);
if ( dest_mode == 0 ) /* Physical mode. */
{
@@ -304,7 +302,7 @@ static uint32_t ioapic_get_delivery_bitm
}
out:
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask mask %x\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "mask %x",
mask);
return mask;
}
@@ -331,14 +329,13 @@ static void vioapic_deliver(struct hvm_h
HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
"dest=%x dest_mode=%x delivery_mode=%x "
- "vector=%x trig_mode=%x\n",
+ "vector=%x trig_mode=%x",
dest, dest_mode, delivery_mode, vector, trig_mode);
deliver_bitmask = ioapic_get_delivery_bitmask(vioapic, dest, dest_mode);
if ( !deliver_bitmask )
{
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
- "no target on destination\n");
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "no target on destination");
return;
}
@@ -364,7 +361,7 @@ static void vioapic_deliver(struct hvm_h
else
{
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
- "mask=%x vector=%x delivery_mode=%x\n",
+ "mask=%x vector=%x delivery_mode=%x",
deliver_bitmask, vector, dest_LowestPrio);
}
break;
@@ -412,7 +409,7 @@ void vioapic_irq_positive_edge(struct do
struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
union vioapic_redir_entry *ent;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_irq_positive_edge irq %x", irq);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
ASSERT(irq < VIOAPIC_NUM_PINS);
ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
Index: 2007-05-14/xen/arch/x86/hvm/vlapic.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/vlapic.c 2007-04-23 10:01:41.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/vlapic.c 2007-05-14 14:33:28.000000000
+0200
@@ -171,7 +171,7 @@ uint32_t vlapic_get_ppr(struct vlapic *v
ppr = isrv & 0xf0;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
- "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x.",
+ "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
vlapic, ppr, isr, isrv);
return ppr;
@@ -211,7 +211,7 @@ static int vlapic_match_dest(struct vcpu
struct vlapic *target = vcpu_vlapic(v);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
- "dest_mode 0x%x, short_hand 0x%x\n",
+ "dest_mode 0x%x, short_hand 0x%x",
target, source, dest, dest_mode, short_hand);
switch ( short_hand )
@@ -270,14 +270,14 @@ static int vlapic_accept_irq(struct vcpu
if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "level trig mode repeatedly for vector %d\n", vector);
+ "level trig mode repeatedly for vector %d", vector);
break;
}
if ( trig_mode )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "level trig mode for vector %d\n", vector);
+ "level trig mode for vector %d", vector);
vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
}
@@ -399,7 +399,7 @@ static void vlapic_ipi(struct vlapic *vl
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
- "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x.",
+ "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
icr_high, icr_low, short_hand, dest,
trig_mode, level, dest_mode, delivery_mode, vector);
@@ -437,7 +437,7 @@ static uint32_t vlapic_get_tmcct(struct
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
"timer initial count %d, timer current count %d, "
- "offset %"PRId64".",
+ "offset %"PRId64,
tmict, tmcct, counter_passed);
return tmcct;
@@ -454,7 +454,7 @@ static void vlapic_set_tdcr(struct vlapi
vlapic->hw.timer_divisor = 1 << (val & 7);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
- "vlapic_set_tdcr timer_divisor: %d.",
vlapic->hw.timer_divisor);
+ "timer_divisor: %d", vlapic->hw.timer_divisor);
}
static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
@@ -493,7 +493,7 @@ static unsigned long vlapic_read(struct
/* some bugs on kernel cause read this with byte*/
if ( len != 4 )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "read with len=0x%lx, should be 4 instead.\n",
+ "read with len=0x%lx, should be 4 instead",
len);
alignment = offset & 0x3;
@@ -522,7 +522,7 @@ static unsigned long vlapic_read(struct
}
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
- "and the result is 0x%lx.", offset, len, result);
+ "and the result is 0x%lx", offset, len, result);
return result;
@@ -539,7 +539,7 @@ static void vlapic_write(struct vcpu *v,
if ( offset != 0xb0 )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "offset 0x%x with length 0x%lx, and value is 0x%lx.",
+ "offset 0x%x with length 0x%lx, and value is 0x%lx",
offset, len, val);
/*
@@ -713,7 +713,7 @@ void vlapic_msr_set(struct vlapic *vlapi
vlapic->hw.apic_base_msr = value;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "apic base msr is 0x%016"PRIx64".", vlapic->hw.apic_base_msr);
+ "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
}
int vlapic_accept_pic_intr(struct vcpu *v)
@@ -913,7 +913,7 @@ int vlapic_init(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
- HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
+ HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
vlapic->regs_page = alloc_domheap_page(NULL);
if ( vlapic->regs_page == NULL )
Index: 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-05-14 14:33:24.000000000
+0200
+++ 2007-05-14/xen/arch/x86/hvm/vmx/vmx.c 2007-05-14 14:33:28.000000000
+0200
@@ -111,10 +111,11 @@ static void vmx_save_host_msrs(void)
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
{
u64 msr_content = 0;
+ u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
- switch ( (u32)regs->ecx ) {
+ switch ( ecx ) {
case MSR_EFER:
msr_content = v->arch.hvm_vmx.efer;
break;
@@ -157,7 +158,7 @@ static inline int long_mode_do_msr_read(
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: 0x%"PRIx64, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
regs->eax = (u32)(msr_content >> 0);
regs->edx = (u32)(msr_content >> 32);
@@ -173,8 +174,7 @@ static inline int long_mode_do_msr_write
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
- HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n",
- ecx, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
switch ( ecx )
{
@@ -262,7 +262,7 @@ static inline int long_mode_do_msr_write
return 1;
uncanonical_address:
- HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx);
+ HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
gp_fault:
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
return 0;
@@ -577,7 +577,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
* If different, make a shadow. Check if the PDBR is valid
* first.
*/
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
/* current!=vcpu as not called by arch_vmx_do_launch */
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
@@ -2024,7 +2024,7 @@ static int vmx_set_cr0(unsigned long val
unsigned long old_cr0;
unsigned long old_base_mfn;
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
/* ET is reserved and should be always be 1. */
value |= X86_CR0_ET;
@@ -2073,12 +2073,12 @@ static int vmx_set_cr0(unsigned long val
if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
- "with EFER.LME set but not CR4.PAE\n");
+ "with EFER.LME set but not CR4.PAE");
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
}
else
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
v->arch.hvm_vmx.efer |= EFER_LMA;
vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value |= VM_ENTRY_IA32E_MODE;
@@ -2139,7 +2139,7 @@ static int vmx_set_cr0(unsigned long val
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Transfering control to vmxassist %%eip 0x%lx\n", eip);
+ "Transfering control to vmxassist %%eip 0x%lx", eip);
return 0; /* do not update eip! */
}
}
@@ -2147,12 +2147,12 @@ static int vmx_set_cr0(unsigned long val
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Enabling CR0.PE at %%eip 0x%lx\n", eip);
+ "Enabling CR0.PE at %%eip 0x%lx", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Restoring to %%eip 0x%lx\n", eip);
+ "Restoring to %%eip 0x%lx", eip);
return 0; /* do not update eip! */
}
}
@@ -2310,7 +2310,7 @@ static int mov_to_cr(int gp, int cr, str
if ( unlikely(vmx_long_mode_enabled(v)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
- "EFER.LMA is set\n");
+ "EFER.LMA is set");
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
}
}
@@ -2440,8 +2440,7 @@ static inline int vmx_do_msr_read(struct
u32 ecx = regs->ecx, eax, edx;
struct vcpu *v = current;
- HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
- ecx, (u32)regs->eax, (u32)regs->edx);
+ HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
switch (ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|