Code duplication in the hvm_memory_event_*() functions. That should be
abstracted out into a common function hvm_memory_event() taking sufficient
extra args so that it can be called from each of the (now very small)
specific hvm_memory_event_*() functions.
I also note some coding style issues:
- Opening brace for a function definition belongs on a new line
- The long if(...){....} in hvm_memory_event_*() can be better expressed as
if(!...) return 0; then the main meat of the function can follow, without
indentation within an if block.
Also your inline patches are being wordwrapped by your email client, so they
won't apply. We will apply your patches as a series, rather than the
all-in-one attachment you also posted, so the inline patches do need to be
apply-able.
-- Keir
On 04/01/2011 22:07, "Joe Epstein" <jepstein98@xxxxxxxxx> wrote:
> * Allows a memory event listener to register for events on changes to
> CR0, CR3, and CR4, as well as INT3 instructions, as a part of the
> mem_access mechanism. These events can be either synchronous or
> asynchronous.
>
> * For INT3, the logic works independent of a debugger, and so both can
> be supported.
>
> * The presence and type of listener are stored and accessed through
> HVM params.
>
> * Changed the event mask handling to ensure that the right events are
> captured based on the listeners.
>
> * Added the ability to inject HW/SW traps into a VCPU when it next
> resumes (rather than try to modify the existing IRQ injection
> code paths). Only one trap to inject can be outstanding at a time.
>
> Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>
>
> diff -r 3500724e8052 -r c1866aff7a5f xen/include/asm-x86/hvm/hvm.h
> --- a/xen/include/asm-x86/hvm/hvm.h Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/include/asm-x86/hvm/hvm.h Tue Jan 04 12:35:49 2011 -0800
> @@ -370,4 +370,12 @@
> int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t
> *msr_content);
> int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t
> msr_content);
>
> +/* Called for current VCPU on crX changes by guest */
> +int hvm_memory_event_cr0(unsigned long value, unsigned long old);
> +int hvm_memory_event_cr3(unsigned long value, unsigned long old);
> +int hvm_memory_event_cr4(unsigned long value, unsigned long old);
> +
> +/* Called for current VCPU on int3 */
> +int hvm_memory_event_int3(unsigned long gla);
> +
> #endif /* __ASM_X86_HVM_HVM_H__ */
> diff -r 3500724e8052 -r c1866aff7a5f xen/include/asm-x86/hvm/vcpu.h
> --- a/xen/include/asm-x86/hvm/vcpu.h Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/include/asm-x86/hvm/vcpu.h Tue Jan 04 12:35:49 2011 -0800
> @@ -114,6 +114,11 @@
> /* We may write up to m128 as a number of device-model transactions. */
> paddr_t mmio_large_write_pa;
> unsigned int mmio_large_write_bytes;
> +
> + /* Pending hw/sw interrupt */
> + int inject_trap; /* -1 for nothing to inject */
> + int inject_error_code;
> + unsigned long inject_cr2;
> };
>
> #endif /* __ASM_X86_HVM_VCPU_H__ */
> diff -r 3500724e8052 -r c1866aff7a5f xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/include/public/hvm/hvm_op.h Tue Jan 04 12:35:49 2011 -0800
> @@ -198,4 +198,23 @@
> };
> typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
> +
> +#define HVMOP_inject_trap 14
> +/* Inject a trap into a VCPU, which will get taken up on the next
> + * scheduling of it */
> +struct xen_hvm_inject_trap {
> + /* Domain to be queried. */
> + domid_t domid;
> + /* VCPU */
> + uint32_t vcpuid;
> + /* Trap number */
> + uint32_t trap;
> + /* Error code, or -1 to skip */
> + uint32_t error_code;
> + /* CR2 for page faults */
> + uint64_t cr2;
> +};
> +typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
> +
> #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
> diff -r 3500724e8052 -r c1866aff7a5f xen/include/public/hvm/params.h
> --- a/xen/include/public/hvm/params.h Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/include/public/hvm/params.h Tue Jan 04 12:35:49 2011 -0800
> @@ -124,6 +124,19 @@
> */
> #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
>
> -#define HVM_NR_PARAMS 20
> +/* Enable blocking memory events, async or sync (pause vcpu until response)
> + * onchangeonly indicates messages only on a change of value */
> +#define HVM_PARAM_MEMORY_EVENT_CR0 20
> +#define HVM_PARAM_MEMORY_EVENT_CR3 21
> +#define HVM_PARAM_MEMORY_EVENT_CR4 22
> +#define HVM_PARAM_MEMORY_EVENT_INT3 23
> +
> +#define HVMPME_MODE_MASK (3 << 0)
> +#define HVMPME_mode_disabled 0
> +#define HVMPME_mode_async 1
> +#define HVMPME_mode_sync 2
> +#define HVMPME_onchangeonly (1 << 2)
> +
> +#define HVM_NR_PARAMS 24
>
> #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
> diff -r 3500724e8052 -r c1866aff7a5f xen/include/public/mem_event.h
> --- a/xen/include/public/mem_event.h Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/include/public/mem_event.h Tue Jan 04 12:35:49 2011 -0800
> @@ -37,6 +37,10 @@
> /* Reasons for the memory event request */
> #define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */
> #define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is
> address */
> +#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */
> +#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */
> +#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */
> +#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
>
> typedef struct mem_event_shared_page {
> uint32_t port;
> diff -r 3500724e8052 -r c1866aff7a5f xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/arch/x86/hvm/hvm.c Tue Jan 04 12:35:49 2011 -0800
> @@ -309,6 +309,15 @@
> return; /* bail */
> }
> }
> +
> + /* Inject pending hw/sw trap */
> + if (v->arch.hvm_vcpu.inject_trap != -1)
> + {
> + hvm_inject_exception(v->arch.hvm_vcpu.inject_trap,
> + v->arch.hvm_vcpu.inject_error_code,
> + v->arch.hvm_vcpu.inject_cr2);
> + v->arch.hvm_vcpu.inject_trap = -1;
> + }
> }
>
> static void hvm_init_ioreq_page(
> @@ -949,6 +958,8 @@
> spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
> INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
>
> + v->arch.hvm_vcpu.inject_trap = -1;
> +
> #ifdef CONFIG_COMPAT
> rc = setup_compat_arg_xlat(v);
> if ( rc != 0 )
> @@ -3216,10 +3227,34 @@
> case HVM_PARAM_ACPI_IOPORTS_LOCATION:
> rc = pmtimer_change_ioport(d, a.value);
> break;
> + case HVM_PARAM_MEMORY_EVENT_INT3:
> + if ( a.value & HVMPME_onchangeonly )
> + rc = -EINVAL;
> + break;
> }
>
> - if ( rc == 0 )
> + if ( rc == 0 )
> + {
> d->arch.hvm_domain.params[a.index] = a.value;
> +
> + switch( a.index )
> + {
> + case HVM_PARAM_MEMORY_EVENT_INT3:
> + {
> + domain_pause(d);
> + domain_unpause(d); /* Causes guest to latch new status */
> + break;
> + }
> + case HVM_PARAM_MEMORY_EVENT_CR3:
> + {
> + for_each_vcpu ( d, v )
> + hvm_funcs.update_guest_cr(v, 0); /* Latches
> new CR3 mask through CR0 code */
> + break;
> + }
> + }
> +
> + }
> +
> }
> else
> {
> @@ -3631,6 +3666,41 @@
> break;
> }
>
> + case HVMOP_inject_trap:
> + {
> + xen_hvm_inject_trap_t tr;
> + struct domain *d;
> + struct vcpu *v;
> +
> + if ( copy_from_guest(&tr, arg, 1 ) )
> + return -EFAULT;
> +
> + rc = rcu_lock_target_domain_by_id(tr.domid, &d);
> + if ( rc != 0 )
> + return rc;
> +
> + rc = -EINVAL;
> + if ( !is_hvm_domain(d) )
> + goto param_fail8;
> +
> + rc = -ENOENT;
> + if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
> + goto param_fail8;
> +
> + if ( v->arch.hvm_vcpu.inject_trap != -1 )
> + rc = -EBUSY;
> + else
> + {
> + v->arch.hvm_vcpu.inject_trap = tr.trap;
> + v->arch.hvm_vcpu.inject_error_code = tr.error_code;
> + v->arch.hvm_vcpu.inject_cr2 = tr.cr2;
> + }
> +
> + param_fail8:
> + rcu_unlock_domain(d);
> + break;
> + }
> +
> default:
> {
> gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
> @@ -3671,6 +3741,161 @@
> return rc;
> }
>
> +int hvm_memory_event_cr0(unsigned long value, unsigned long old) {
> + struct vcpu* v = current;
> + struct domain *d = v->domain;
> + mem_event_request_t req;
> +
> + long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR0];
> +
> + if ( p & HVMPME_MODE_MASK )
> + {
> + int rc;
> +
> + if ( (p & HVMPME_onchangeonly) && value == old )
> + return 1;
> +
> + rc = mem_event_check_ring(d);
> + if ( rc )
> + return rc;
> +
> + memset(&req, 0, sizeof(req));
> + req.type = MEM_EVENT_TYPE_ACCESS;
> + req.reason = MEM_EVENT_REASON_CR0;
> +
> + if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
> + {
> + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
> + vcpu_pause_nosync(v);
> + }
> +
> + req.gfn = value;
> + req.vcpu_id = v->vcpu_id;
> +
> + mem_event_put_request(d, &req);
> +
> + return 1;
> + }
> + return 0;
> +}
> +
> +int hvm_memory_event_cr3(unsigned long value, unsigned long old) {
> + struct vcpu* v = current;
> + struct domain *d = v->domain;
> + mem_event_request_t req;
> +
> + long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3];
> +
> + if ( p & HVMPME_MODE_MASK )
> + {
> + int rc;
> +
> + if ( (p & HVMPME_onchangeonly) && value == old )
> + return 1;
> +
> + rc = mem_event_check_ring(d);
> + if ( rc )
> + return rc;
> +
> + memset(&req, 0, sizeof(req));
> + req.type = MEM_EVENT_TYPE_ACCESS;
> + req.reason = MEM_EVENT_REASON_CR3;
> +
> + if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
> + {
> + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
> + vcpu_pause_nosync(v);
> + }
> +
> + req.gfn = value;
> + req.vcpu_id = v->vcpu_id;
> +
> + mem_event_put_request(d, &req);
> +
> + return 1;
> + }
> + return 0;
> +}
> +
> +int hvm_memory_event_cr4(unsigned long value, unsigned long old) {
> + struct vcpu* v = current;
> + struct domain *d = v->domain;
> + mem_event_request_t req;
> +
> + long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR4];
> +
> + if ( p & HVMPME_MODE_MASK )
> + {
> + int rc;
> +
> + if ( (p & HVMPME_onchangeonly) && value == old )
> + return 1;
> +
> + rc = mem_event_check_ring(d);
> + if ( rc )
> + return rc;
> +
> + memset(&req, 0, sizeof(req));
> + req.type = MEM_EVENT_TYPE_ACCESS;
> + req.reason = MEM_EVENT_REASON_CR4;
> +
> + if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
> + {
> + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
> + vcpu_pause_nosync(v);
> + }
> +
> + req.gfn = value;
> + req.vcpu_id = v->vcpu_id;
> +
> + mem_event_put_request(d, &req);
> +
> + return 1;
> + }
> + return 0;
> +}
> +
> +int hvm_memory_event_int3(unsigned long gla) {
> + struct vcpu* v = current;
> + struct domain *d = v->domain;
> + mem_event_request_t req;
> +
> + long p = d->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
> +
> + if ( p & HVMPME_MODE_MASK )
> + {
> + uint32_t pfec = PFEC_page_present;
> + unsigned long gfn;
> + int rc;
> +
> + rc = mem_event_check_ring(d);
> + if ( rc )
> + return rc;
> +
> + gfn = paging_gva_to_gfn(current, gla, &pfec);
> +
> + memset(&req, 0, sizeof(req));
> + req.type = MEM_EVENT_TYPE_ACCESS;
> + req.reason = MEM_EVENT_REASON_INT3;
> +
> + if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
> + {
> + req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
> + vcpu_pause_nosync(v);
> + }
> +
> + req.gfn = gfn;
> + req.offset = gla & ((1 << PAGE_SHIFT) - 1);
> + req.gla = gla;
> + req.gla_valid = 1;
> + req.vcpu_id = v->vcpu_id;
> +
> + mem_event_put_request(d, &req);
> +
> + return 1;
> + }
> + return 0;
> +}
>
> /*
> * Local variables:
> diff -r 3500724e8052 -r c1866aff7a5f xen/arch/x86/hvm/vmx/vmcs.c
> --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Jan 04 12:35:49 2011 -0800
> @@ -1082,7 +1082,9 @@
> hvm_asid_flush_vcpu(v);
> }
>
> - debug_state = v->domain->debugger_attached;
> + debug_state = v->domain->debugger_attached
> + ||
> v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
> +
> if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
> {
> v->arch.hvm_vcpu.debug_state_latch = debug_state;
> diff -r 3500724e8052 -r c1866aff7a5f xen/arch/x86/hvm/vmx/vmx.c
> --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 04 12:34:06 2011 -0800
> +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 04 12:35:49 2011 -0800
> @@ -1064,12 +1064,16 @@
>
> if ( paging_mode_hap(v->domain) )
> {
> - /* We manage GUEST_CR3 when guest CR0.PE is zero. */
> + /* We manage GUEST_CR3 when guest CR0.PE is zero or when
> cr3 memevents are on */
> uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
> CPU_BASED_CR3_STORE_EXITING);
> v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
> if ( !hvm_paging_enabled(v) )
> v->arch.hvm_vmx.exec_control |= cr3_ctls;
> +
> + if (
> v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
> + v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
> +
> vmx_update_cpu_exec_control(v);
>
> /* Changing CR0.PE can change some bits in real CR4. */
> @@ -1252,9 +1256,12 @@
> unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
> struct vcpu *curr = current;
>
> + int type = X86_EVENTTYPE_HW_EXCEPTION;
> +
> switch ( trap )
> {
> case TRAP_debug:
> + type = X86_EVENTTYPE_SW_EXCEPTION;
> if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
> {
> __restore_debug_registers(curr);
> @@ -1269,6 +1276,9 @@
> domain_pause_for_debugger();
> return;
> }
> +
> + type = X86_EVENTTYPE_SW_EXCEPTION;
> + __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
> }
>
> if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
> @@ -1279,7 +1289,7 @@
> error_code = 0;
> }
>
> - __vmx_inject_exception(trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
> + __vmx_inject_exception(trap, type, error_code);
>
> if ( trap == TRAP_page_fault )
> HVMTRACE_LONG_2D(PF_INJECT, error_code,
> @@ -1565,6 +1575,8 @@
> unsigned long value;
> struct vcpu *v = current;
> struct vlapic *vlapic = vcpu_vlapic(v);
> + int rc = 0;
> + unsigned long old;
>
> switch ( gp )
> {
> @@ -1589,13 +1601,25 @@
> switch ( cr )
> {
> case 0:
> - return !hvm_set_cr0(value);
> + old = v->arch.hvm_vcpu.guest_cr[0];
> + rc = !hvm_set_cr0(value);
> + if (rc)
> + hvm_memory_event_cr0(value, old);
> + return rc;
>
> case 3:
> - return !hvm_set_cr3(value);
> + old = v->arch.hvm_vcpu.guest_cr[3];
> + rc = !hvm_set_cr3(value);
> + if (rc)
> + hvm_memory_event_cr3(value, old);
> + return rc;
>
> case 4:
> - return !hvm_set_cr4(value);
> + old = v->arch.hvm_vcpu.guest_cr[4];
> + rc = !hvm_set_cr4(value);
> + if (rc)
> + hvm_memory_event_cr4(value, old);
> + return rc;
>
> case 8:
> vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
> @@ -1676,11 +1700,17 @@
> cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
> mov_from_cr(cr, gp, regs);
> break;
> - case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
> + case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
> + {
> + unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
> v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
> vmx_update_guest_cr(v, 0);
> +
> + hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
> +
> HVMTRACE_0D(CLTS);
> break;
> + }
> case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
> value = v->arch.hvm_vcpu.guest_cr[0];
> /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
> @@ -2350,13 +2380,29 @@
> goto exit_and_crash;
> domain_pause_for_debugger();
> break;
> - case TRAP_int3:
> - if ( !v->domain->debugger_attached )
> - goto exit_and_crash;
> - update_guest_eip(); /* Safe: INT3 */
> - current->arch.gdbsx_vcpu_event = TRAP_int3;
> - domain_pause_for_debugger();
> - break;
> + case TRAP_int3:
> + {
> + if ( v->domain->debugger_attached )
> + {
> + update_guest_eip(); /* Safe: INT3 */
> + current->arch.gdbsx_vcpu_event = TRAP_int3;
> + domain_pause_for_debugger();
> + break;
> + }
> + else {
> + int handled = hvm_memory_event_int3(regs->eip);
> +
> + if ( handled < 0 )
> + {
> + vmx_inject_exception(TRAP_int3,
> HVM_DELIVER_NO_ERROR_CODE, 0);
> + break;
> + }
> + else if ( handled )
> + break;
> + }
> +
> + goto exit_and_crash;
> + }
> case TRAP_no_device:
> vmx_fpu_dirty_intercept();
> break;
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|