# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID bd6d4a499e47c6a90ff0e06d242f7c2b47b12e08
# Parent 21f8c507da293d8c707071bafffeb2e9100f3922
[HVM] MMIO/PIO fixes and cleanups.
1. Fix MMIO/PIO cross page boundary copy for MOVS and OUTS/INS
handling.
2. Clean up send_mmio_req/send_pio_req interface.
3. Clean up handle_mmio.
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
Signed-off-by: Xiaowei Yang <xiaowei.yang@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
---
xen/arch/x86/hvm/intercept.c | 98 ++--
xen/arch/x86/hvm/io.c | 37 -
xen/arch/x86/hvm/platform.c | 772 ++++++++++++++++++--------------------
xen/arch/x86/hvm/svm/svm.c | 38 +
xen/arch/x86/hvm/vmx/vmx.c | 86 ++--
xen/arch/x86/mm/shadow/multi.c | 2
xen/include/asm-x86/hvm/io.h | 42 --
xen/include/asm-x86/hvm/support.h | 4
8 files changed, 535 insertions(+), 544 deletions(-)
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/hvm/intercept.c Wed Oct 18 15:13:41 2006 +0100
@@ -61,49 +61,39 @@ static inline void hvm_mmio_access(struc
hvm_mmio_read_t read_handler,
hvm_mmio_write_t write_handler)
{
- ioreq_t *req;
- vcpu_iodata_t *vio = get_vio(v->domain, v->vcpu_id);
unsigned int tmp1, tmp2;
unsigned long data;
- if (vio == NULL) {
- printk("vlapic_access: bad shared page\n");
- domain_crash_synchronous();
- }
-
- req = &vio->vp_ioreq;
-
- switch (req->type) {
+ switch ( p->type ) {
case IOREQ_TYPE_COPY:
{
- int sign = (req->df) ? -1 : 1, i;
-
- if (!req->pdata_valid) {
- if (req->dir == IOREQ_READ){
- req->u.data = read_handler(v, req->addr, req->size);
- } else { /* req->dir != IOREQ_READ */
- write_handler(v, req->addr, req->size, req->u.data);
- }
- } else { /* !req->pdata_valid */
- if (req->dir == IOREQ_READ) {
- for (i = 0; i < req->count; i++) {
+ if ( !p->pdata_valid ) {
+ if ( p->dir == IOREQ_READ )
+ p->u.data = read_handler(v, p->addr, p->size);
+ else /* p->dir == IOREQ_WRITE */
+ write_handler(v, p->addr, p->size, p->u.data);
+ } else { /* !p->pdata_valid */
+ int i, sign = (p->df) ? -1 : 1;
+
+ if ( p->dir == IOREQ_READ ) {
+ for ( i = 0; i < p->count; i++ ) {
data = read_handler(v,
- req->addr + (sign * i * req->size),
- req->size);
- (void)hvm_copy_to_guest_virt(
- (unsigned long)p->u.pdata + (sign * i * req->size),
+ p->addr + (sign * i * p->size),
+ p->size);
+ (void)hvm_copy_to_guest_phys(
+ (unsigned long)p->u.pdata + (sign * i * p->size),
&data,
p->size);
}
- } else { /* !req->dir == IOREQ_READ */
- for (i = 0; i < req->count; i++) {
- (void)hvm_copy_from_guest_virt(
+ } else {/* p->dir == IOREQ_WRITE */
+ for ( i = 0; i < p->count; i++ ) {
+ (void)hvm_copy_from_guest_phys(
&data,
- (unsigned long)p->u.pdata + (sign * i * req->size),
+ (unsigned long)p->u.pdata + (sign * i * p->size),
p->size);
write_handler(v,
- req->addr + (sign * i * req->size),
- req->size, data);
+ p->addr + (sign * i * p->size),
+ p->size, data);
}
}
}
@@ -111,44 +101,44 @@ static inline void hvm_mmio_access(struc
}
case IOREQ_TYPE_AND:
- tmp1 = read_handler(v, req->addr, req->size);
- if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 & (unsigned long) req->u.data;
- write_handler(v, req->addr, req->size, tmp2);
- }
- req->u.data = tmp1;
+ tmp1 = read_handler(v, p->addr, p->size);
+ if ( p->dir == IOREQ_WRITE ) {
+ tmp2 = tmp1 & (unsigned long) p->u.data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->u.data = tmp1;
break;
case IOREQ_TYPE_OR:
- tmp1 = read_handler(v, req->addr, req->size);
- if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 | (unsigned long) req->u.data;
- write_handler(v, req->addr, req->size, tmp2);
- }
- req->u.data = tmp1;
+ tmp1 = read_handler(v, p->addr, p->size);
+ if ( p->dir == IOREQ_WRITE ) {
+ tmp2 = tmp1 | (unsigned long) p->u.data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->u.data = tmp1;
break;
case IOREQ_TYPE_XOR:
- tmp1 = read_handler(v, req->addr, req->size);
- if (req->dir == IOREQ_WRITE) {
- tmp2 = tmp1 ^ (unsigned long) req->u.data;
- write_handler(v, req->addr, req->size, tmp2);
- }
- req->u.data = tmp1;
+ tmp1 = read_handler(v, p->addr, p->size);
+ if ( p->dir == IOREQ_WRITE ) {
+ tmp2 = tmp1 ^ (unsigned long) p->u.data;
+ write_handler(v, p->addr, p->size, tmp2);
+ }
+ p->u.data = tmp1;
break;
case IOREQ_TYPE_XCHG:
- /*
+ /*
* Note that we don't need to be atomic here since VCPU is accessing
* its own local APIC.
*/
- tmp1 = read_handler(v, req->addr, req->size);
- write_handler(v, req->addr, req->size, (unsigned long) req->u.data);
- req->u.data = tmp1;
+ tmp1 = read_handler(v, p->addr, p->size);
+ write_handler(v, p->addr, p->size, (unsigned long) p->u.data);
+ p->u.data = tmp1;
break;
default:
- printk("error ioreq type for local APIC %x\n", req->type);
+ printk("hvm_mmio_access: error ioreq type %x\n", p->type);
domain_crash_synchronous();
break;
}
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/hvm/io.c Wed Oct 18 15:13:41 2006 +0100
@@ -369,18 +369,18 @@ static void hvm_pio_assist(struct cpu_us
{
if ( pio_opp->flags & REPZ )
regs->ecx -= p->count;
+
if ( p->dir == IOREQ_READ )
{
- regs->edi += sign * p->count * p->size;
if ( pio_opp->flags & OVERLAP )
{
- unsigned long addr = regs->edi;
- if (hvm_realmode(current))
- addr += regs->es << 4;
- if (sign > 0)
- addr -= p->size;
- (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
+ unsigned long addr = pio_opp->addr;
+ if ( hvm_paging_enabled(current) )
+ (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
+ else
+ (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size);
}
+ regs->edi += sign * p->count * p->size;
}
else /* p->dir == IOREQ_WRITE */
{
@@ -485,19 +485,22 @@ static void hvm_mmio_assist(struct cpu_u
case INSTR_MOVS:
sign = p->df ? -1 : 1;
+
+ if (mmio_opp->flags & REPZ)
+ regs->ecx -= p->count;
+
+ if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
+ unsigned long addr = mmio_opp->addr;
+
+ if (hvm_paging_enabled(current))
+ (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
+ else
+ (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size);
+ }
+
regs->esi += sign * p->count * p->size;
regs->edi += sign * p->count * p->size;
- if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
- unsigned long addr = regs->edi;
-
- if (sign > 0)
- addr -= p->size;
- (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size);
- }
-
- if (mmio_opp->flags & REPZ)
- regs->ecx -= p->count;
break;
case INSTR_STOS:
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/hvm/platform.c Wed Oct 18 15:13:41 2006 +0100
@@ -30,6 +30,7 @@
#include <asm/regs.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/io.h>
#include <public/hvm/ioreq.h>
#include <xen/lib.h>
@@ -39,10 +40,13 @@
#define DECODE_success 1
#define DECODE_failure 0
+#define mk_operand(size_reg, index, seg, flag) \
+ (((size_reg) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
+
#if defined (__x86_64__)
static inline long __get_reg_value(unsigned long reg, int size)
{
- switch(size) {
+ switch ( size ) {
case BYTE_64:
return (char)(reg & 0xFF);
case WORD:
@@ -59,8 +63,8 @@ static inline long __get_reg_value(unsig
long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
- if (size == BYTE) {
- switch (index) {
+ if ( size == BYTE ) {
+ switch ( index ) {
case 0: /* %al */
return (char)(regs->rax & 0xFF);
case 1: /* %cl */
@@ -84,7 +88,7 @@ long get_reg_value(int size, int index,
/* NOTREACHED */
}
- switch (index) {
+ switch ( index ) {
case 0: return __get_reg_value(regs->rax, size);
case 1: return __get_reg_value(regs->rcx, size);
case 2: return __get_reg_value(regs->rdx, size);
@@ -109,7 +113,7 @@ long get_reg_value(int size, int index,
#elif defined (__i386__)
static inline long __get_reg_value(unsigned long reg, int size)
{
- switch(size) {
+ switch ( size ) {
case WORD:
return (short)(reg & 0xFFFF);
case LONG:
@@ -122,8 +126,8 @@ static inline long __get_reg_value(unsig
long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
- if (size == BYTE) {
- switch (index) {
+ if ( size == BYTE ) {
+ switch ( index ) {
case 0: /* %al */
return (char)(regs->eax & 0xFF);
case 1: /* %cl */
@@ -146,7 +150,7 @@ long get_reg_value(int size, int index,
}
}
- switch (index) {
+ switch ( index ) {
case 0: return __get_reg_value(regs->eax, size);
case 1: return __get_reg_value(regs->ecx, size);
case 2: return __get_reg_value(regs->edx, size);
@@ -163,19 +167,21 @@ long get_reg_value(int size, int index,
#endif
static inline unsigned char *check_prefix(unsigned char *inst,
- struct instruction *thread_inst,
unsigned char *rex_p)
-{
- while (1) {
- switch (*inst) {
+ struct hvm_io_op *mmio_op,
+ unsigned char *op_size,
+ unsigned char *rex_p)
+{
+ while ( 1 ) {
+ switch ( *inst ) {
/* rex prefix for em64t instructions */
case 0x40 ... 0x4e:
*rex_p = *inst;
break;
case 0xf3: /* REPZ */
- thread_inst->flags = REPZ;
+ mmio_op->flags = REPZ;
break;
case 0xf2: /* REPNZ */
- thread_inst->flags = REPNZ;
+ mmio_op->flags = REPNZ;
break;
case 0xf0: /* LOCK */
break;
@@ -185,10 +191,10 @@ static inline unsigned char *check_prefi
case 0x26: /* ES */
case 0x64: /* FS */
case 0x65: /* GS */
- thread_inst->seg_sel = *inst;
+ //mmio_op->seg_sel = *inst;
break;
case 0x66: /* 32bit->16bit */
- thread_inst->op_size = WORD;
+ *op_size = WORD;
break;
case 0x67:
break;
@@ -199,7 +205,7 @@ static inline unsigned char *check_prefi
}
}
-static inline unsigned long get_immediate(int op16,const unsigned char *inst,
int op_size)
+static inline unsigned long get_immediate(int op16, const unsigned char *inst,
int op_size)
{
int mod, reg, rm;
unsigned long val = 0;
@@ -210,14 +216,14 @@ static inline unsigned long get_immediat
rm = *inst & 7;
inst++; //skip ModR/M byte
- if (mod != 3 && rm == 4) {
+ if ( mod != 3 && rm == 4 ) {
inst++; //skip SIB byte
}
- switch(mod) {
+ switch ( mod ) {
case 0:
- if (rm == 5 || rm == 4) {
- if (op16)
+ if ( rm == 5 || rm == 4 ) {
+ if ( op16 )
inst = inst + 2; //disp16, skip 2 bytes
else
inst = inst + 4; //disp32, skip 4 bytes
@@ -227,17 +233,17 @@ static inline unsigned long get_immediat
inst++; //disp8, skip 1 byte
break;
case 2:
- if (op16)
+ if ( op16 )
inst = inst + 2; //disp16, skip 2 bytes
else
inst = inst + 4; //disp32, skip 4 bytes
break;
}
- if (op_size == QUAD)
+ if ( op_size == QUAD )
op_size = LONG;
- for (i = 0; i < op_size; i++) {
+ for ( i = 0; i < op_size; i++ ) {
val |= (*inst++ & 0xff) << (8 * i);
}
@@ -257,7 +263,7 @@ static inline int get_index(const unsign
rex_b = rex & 1;
//Only one operand in the instruction is register
- if (mod == 3) {
+ if ( mod == 3 ) {
return (rm + (rex_b << 3));
} else {
return (reg + (rex_r << 3));
@@ -265,53 +271,52 @@ static inline int get_index(const unsign
return 0;
}
-static void init_instruction(struct instruction *mmio_inst)
-{
- mmio_inst->instr = 0;
- mmio_inst->op_size = 0;
- mmio_inst->immediate = 0;
- mmio_inst->seg_sel = 0;
-
- mmio_inst->operand[0] = 0;
- mmio_inst->operand[1] = 0;
-
- mmio_inst->flags = 0;
-}
-
-#define GET_OP_SIZE_FOR_BYTE(op_size) \
+static void init_instruction(struct hvm_io_op *mmio_op)
+{
+ mmio_op->instr = 0;
+
+ mmio_op->flags = 0;
+ //mmio_op->seg_sel = 0;
+
+ mmio_op->operand[0] = 0;
+ mmio_op->operand[1] = 0;
+ mmio_op->immediate = 0;
+}
+
+#define GET_OP_SIZE_FOR_BYTE(size_reg) \
do { \
- if (rex) \
- op_size = BYTE_64; \
+ if ( rex ) \
+ (size_reg) = BYTE_64; \
else \
- op_size = BYTE; \
- } while(0)
+ (size_reg) = BYTE; \
+ } while( 0 )
#define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
do { \
- if (rex & 0x8) \
- op_size = QUAD; \
- else if (op_size != WORD) \
- op_size = LONG; \
- } while(0)
+ if ( rex & 0x8 ) \
+ (op_size) = QUAD; \
+ else if ( (op_size) != WORD ) \
+ (op_size) = LONG; \
+ } while( 0 )
/*
* Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
*/
-static int mem_acc(unsigned char size, struct instruction *instr)
-{
- instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
+static inline int mem_acc(unsigned char size, struct hvm_io_op *mmio)
+{
+ mmio->operand[0] = mk_operand(size, 0, 0, MEMORY);
+ mmio->operand[1] = mk_operand(size, 0, 0, REGISTER);
return DECODE_success;
}
/*
* Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
*/
-static int acc_mem(unsigned char size, struct instruction *instr)
-{
- instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
- instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
+static inline int acc_mem(unsigned char size, struct hvm_io_op *mmio)
+{
+ mmio->operand[0] = mk_operand(size, 0, 0, REGISTER);
+ mmio->operand[1] = mk_operand(size, 0, 0, MEMORY);
return DECODE_success;
}
@@ -319,12 +324,12 @@ static int acc_mem(unsigned char size, s
* Decode mem,reg operands (as in <opcode> r32/16, m32/16)
*/
static int mem_reg(unsigned char size, unsigned char *opcode,
- struct instruction *instr, unsigned char rex)
+ struct hvm_io_op *mmio_op, unsigned char rex)
{
int index = get_index(opcode + 1, rex);
- instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(size, index, 0, REGISTER);
+ mmio_op->operand[0] = mk_operand(size, 0, 0, MEMORY);
+ mmio_op->operand[1] = mk_operand(size, index, 0, REGISTER);
return DECODE_success;
}
@@ -332,263 +337,265 @@ static int mem_reg(unsigned char size, u
* Decode reg,mem operands (as in <opcode> m32/16, r32/16)
*/
static int reg_mem(unsigned char size, unsigned char *opcode,
- struct instruction *instr, unsigned char rex)
+ struct hvm_io_op *mmio_op, unsigned char rex)
{
int index = get_index(opcode + 1, rex);
- instr->operand[0] = mk_operand(size, index, 0, REGISTER);
- instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
+ mmio_op->operand[0] = mk_operand(size, index, 0, REGISTER);
+ mmio_op->operand[1] = mk_operand(size, 0, 0, MEMORY);
return DECODE_success;
}
-static int hvm_decode(int realmode, unsigned char *opcode, struct instruction
*instr)
+static int hvm_decode(int realmode, unsigned char *opcode,
+ struct hvm_io_op *mmio_op, unsigned char *op_size)
{
unsigned char size_reg = 0;
unsigned char rex = 0;
int index;
- init_instruction(instr);
-
- opcode = check_prefix(opcode, instr, &rex);
-
- if (realmode) { /* meaning is reversed */
- if (instr->op_size == WORD)
- instr->op_size = LONG;
- else if (instr->op_size == LONG)
- instr->op_size = WORD;
- else if (instr->op_size == 0)
- instr->op_size = WORD;
- }
-
- switch (*opcode) {
+ *op_size = 0;
+ init_instruction(mmio_op);
+
+ opcode = check_prefix(opcode, mmio_op, op_size, &rex);
+
+ if ( realmode ) { /* meaning is reversed */
+ if ( *op_size == WORD )
+ *op_size = LONG;
+ else if ( *op_size == LONG )
+ *op_size = WORD;
+ else if ( *op_size == 0 )
+ *op_size = WORD;
+ }
+
+ switch ( *opcode ) {
case 0x0A: /* or r8, m8 */
- instr->instr = INSTR_OR;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_OR;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_reg(size_reg, opcode, instr, rex);
+ return mem_reg(size_reg, opcode, mmio_op, rex);
case 0x0B: /* or m32/16, r32/16 */
- instr->instr = INSTR_OR;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_OR;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x20: /* and r8, m8 */
- instr->instr = INSTR_AND;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_AND;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return reg_mem(size_reg, opcode, instr, rex);
+ return reg_mem(size_reg, opcode, mmio_op, rex);
case 0x21: /* and r32/16, m32/16 */
- instr->instr = INSTR_AND;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_AND;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return reg_mem(*op_size, opcode, mmio_op, rex);
case 0x22: /* and m8, r8 */
- instr->instr = INSTR_AND;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_AND;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_reg(size_reg, opcode, instr, rex);
+ return mem_reg(size_reg, opcode, mmio_op, rex);
case 0x23: /* and m32/16, r32/16 */
- instr->instr = INSTR_AND;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_AND;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x2B: /* sub m32/16, r32/16 */
- instr->instr = INSTR_SUB;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_SUB;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x30: /* xor r8, m8 */
- instr->instr = INSTR_XOR;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_XOR;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return reg_mem(size_reg, opcode, instr, rex);
+ return reg_mem(size_reg, opcode, mmio_op, rex);
case 0x31: /* xor r32/16, m32/16 */
- instr->instr = INSTR_XOR;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_XOR;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return reg_mem(*op_size, opcode, mmio_op, rex);
case 0x32: /* xor m8, r8*/
- instr->instr = INSTR_XOR;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_XOR;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_reg(size_reg, opcode, instr, rex);
+ return mem_reg(size_reg, opcode, mmio_op, rex);
case 0x39: /* cmp r32/16, m32/16 */
- instr->instr = INSTR_CMP;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_CMP;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return reg_mem(*op_size, opcode, mmio_op, rex);
case 0x3A: /* cmp r8, r8/m8 */
- instr->instr = INSTR_CMP;
- GET_OP_SIZE_FOR_BYTE(instr->op_size);
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_CMP;
+ *op_size = BYTE;
+ GET_OP_SIZE_FOR_BYTE(size_reg);
+ return reg_mem(size_reg, opcode, mmio_op, rex);
case 0x3B: /* cmp m32/16, r32/16 */
- instr->instr = INSTR_CMP;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_CMP;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x80:
case 0x81:
case 0x83:
- {
- unsigned char ins_subtype = (opcode[1] >> 3) & 7;
-
- if (opcode[0] == 0x80) {
- GET_OP_SIZE_FOR_BYTE(size_reg);
- instr->op_size = BYTE;
- } else if (opcode[0] == 0x81) {
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- size_reg = instr->op_size;
- } else if (opcode[0] == 0x83) {
- GET_OP_SIZE_FOR_NONEBYTE(size_reg);
- instr->op_size = size_reg;
- }
-
- /* opcode 0x83 always has a single byte operand */
- if (opcode[0] == 0x83)
- instr->immediate =
- (signed char)get_immediate(realmode, opcode+1, BYTE);
- else
- instr->immediate =
- get_immediate(realmode, opcode+1, instr->op_size);
-
- instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
- instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
-
- switch (ins_subtype) {
- case 7: /* cmp $imm, m32/16 */
- instr->instr = INSTR_CMP;
- return DECODE_success;
-
- case 1: /* or $imm, m32/16 */
- instr->instr = INSTR_OR;
- return DECODE_success;
-
- default:
- printk("%x/%x, This opcode isn't handled yet!\n",
- *opcode, ins_subtype);
- return DECODE_failure;
- }
- }
+ {
+ unsigned char ins_subtype = (opcode[1] >> 3) & 7;
+
+ if ( opcode[0] == 0x80 ) {
+ *op_size = BYTE;
+ GET_OP_SIZE_FOR_BYTE(size_reg);
+ } else {
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ size_reg = *op_size;
+ }
+
+ /* opcode 0x83 always has a single byte operand */
+ if ( opcode[0] == 0x83 )
+ mmio_op->immediate =
+ (signed char)get_immediate(realmode, opcode + 1, BYTE);
+ else
+ mmio_op->immediate =
+ get_immediate(realmode, opcode + 1, *op_size);
+
+ mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
+ mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
+
+ switch ( ins_subtype ) {
+ case 7: /* cmp $imm, m32/16 */
+ mmio_op->instr = INSTR_CMP;
+ return DECODE_success;
+
+ case 1: /* or $imm, m32/16 */
+ mmio_op->instr = INSTR_OR;
+ return DECODE_success;
+
+ default:
+ printk("%x/%x, This opcode isn't handled yet!\n",
+ *opcode, ins_subtype);
+ return DECODE_failure;
+ }
+ }
case 0x84: /* test m8, r8 */
- instr->instr = INSTR_TEST;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_TEST;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_reg(size_reg, opcode, instr, rex);
+ return mem_reg(size_reg, opcode, mmio_op, rex);
case 0x85: /* text m16/32, r16/32 */
- instr->instr = INSTR_TEST;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_TEST;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x87: /* xchg {r/m16|r/m32}, {m/r16|m/r32} */
- instr->instr = INSTR_XCHG;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- if (((*(opcode+1)) & 0xc7) == 5)
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_XCHG;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ if ( ((*(opcode+1)) & 0xc7) == 5 )
+ return reg_mem(*op_size, opcode, mmio_op, rex);
else
- return mem_reg(instr->op_size, opcode, instr, rex);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0x88: /* mov r8, m8 */
- instr->instr = INSTR_MOV;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_MOV;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return reg_mem(size_reg, opcode, instr, rex);
+ return reg_mem(size_reg, opcode, mmio_op, rex);
case 0x89: /* mov r32/16, m32/16 */
- instr->instr = INSTR_MOV;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return reg_mem(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_MOV;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return reg_mem(*op_size, opcode, mmio_op, rex);
case 0x8A: /* mov m8, r8 */
- instr->instr = INSTR_MOV;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_MOV;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_reg(size_reg, opcode, instr, rex);
+ return mem_reg(size_reg, opcode, mmio_op, rex);
case 0x8B: /* mov m32/16, r32/16 */
- instr->instr = INSTR_MOV;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_reg(instr->op_size, opcode, instr, rex);
+ mmio_op->instr = INSTR_MOV;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_reg(*op_size, opcode, mmio_op, rex);
case 0xA0: /* mov <addr>, al */
- instr->instr = INSTR_MOV;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_MOV;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return mem_acc(size_reg, instr);
+ return mem_acc(size_reg, mmio_op);
case 0xA1: /* mov <addr>, ax/eax */
- instr->instr = INSTR_MOV;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return mem_acc(instr->op_size, instr);
+ mmio_op->instr = INSTR_MOV;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return mem_acc(*op_size, mmio_op);
case 0xA2: /* mov al, <addr> */
- instr->instr = INSTR_MOV;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_MOV;
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- return acc_mem(size_reg, instr);
+ return acc_mem(size_reg, mmio_op);
case 0xA3: /* mov ax/eax, <addr> */
- instr->instr = INSTR_MOV;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- return acc_mem(instr->op_size, instr);
+ mmio_op->instr = INSTR_MOV;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ return acc_mem(*op_size, mmio_op);
case 0xA4: /* movsb */
- instr->instr = INSTR_MOVS;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_MOVS;
+ *op_size = BYTE;
return DECODE_success;
case 0xA5: /* movsw/movsl */
- instr->instr = INSTR_MOVS;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_MOVS;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
return DECODE_success;
case 0xAA: /* stosb */
- instr->instr = INSTR_STOS;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_STOS;
+ *op_size = BYTE;
return DECODE_success;
case 0xAB: /* stosw/stosl */
- instr->instr = INSTR_STOS;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_STOS;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
return DECODE_success;
case 0xAC: /* lodsb */
- instr->instr = INSTR_LODS;
- instr->op_size = BYTE;
+ mmio_op->instr = INSTR_LODS;
+ *op_size = BYTE;
return DECODE_success;
case 0xAD: /* lodsw/lodsl */
- instr->instr = INSTR_LODS;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_LODS;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
return DECODE_success;
case 0xC6:
- if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
- instr->instr = INSTR_MOV;
- instr->op_size = BYTE;
-
- instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
- instr->immediate = get_immediate(realmode, opcode+1,
instr->op_size);
- instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+ if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm8, m8 */
+ mmio_op->instr = INSTR_MOV;
+ *op_size = BYTE;
+
+ mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
+ mmio_op->immediate =
+ get_immediate(realmode, opcode + 1, *op_size);
+ mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
return DECODE_success;
} else
return DECODE_failure;
case 0xC7:
- if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
- instr->instr = INSTR_MOV;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
-
- instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
- instr->immediate = get_immediate(realmode, opcode+1,
instr->op_size);
- instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+ if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm16/32, m16/32 */
+ mmio_op->instr = INSTR_MOV;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+
+ mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
+ mmio_op->immediate =
+ get_immediate(realmode, opcode + 1, *op_size);
+ mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
return DECODE_success;
} else
@@ -596,20 +603,21 @@ static int hvm_decode(int realmode, unsi
case 0xF6:
case 0xF7:
- if (((opcode[1] >> 3) & 7) == 0) { /* test $imm8/16/32, m8/16/32 */
- instr->instr = INSTR_TEST;
-
- if (opcode[0] == 0xF6) {
+ if ( ((opcode[1] >> 3) & 7) == 0 ) { /* test $imm8/16/32, m8/16/32 */
+ mmio_op->instr = INSTR_TEST;
+
+ if ( opcode[0] == 0xF6 ) {
+ *op_size = BYTE;
GET_OP_SIZE_FOR_BYTE(size_reg);
- instr->op_size = BYTE;
} else {
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- size_reg = instr->op_size;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ size_reg = *op_size;
}
- instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
- instr->immediate = get_immediate(realmode, opcode+1,
instr->op_size);
- instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
+ mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
+ mmio_op->immediate =
+ get_immediate(realmode, opcode + 1, *op_size);
+ mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
return DECODE_success;
} else
@@ -623,59 +631,59 @@ static int hvm_decode(int realmode, unsi
return DECODE_failure;
}
- switch (*++opcode) {
+ switch ( *++opcode ) {
case 0xB6: /* movzx m8, r16/r32/r64 */
- instr->instr = INSTR_MOVZX;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_MOVZX;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
index = get_index(opcode + 1, rex);
- instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
+ mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
+ mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
return DECODE_success;
case 0xB7: /* movzx m16/m32, r32/r64 */
- instr->instr = INSTR_MOVZX;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_MOVZX;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
index = get_index(opcode + 1, rex);
- if (rex & 0x8)
- instr->operand[0] = mk_operand(LONG, 0, 0, MEMORY);
+ if ( rex & 0x8 )
+ mmio_op->operand[0] = mk_operand(LONG, 0, 0, MEMORY);
else
- instr->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
+ mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
+ mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
return DECODE_success;
case 0xBE: /* movsx m8, r16/r32/r64 */
- instr->instr = INSTR_MOVSX;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_MOVSX;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
index = get_index(opcode + 1, rex);
- instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
+ mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
+ mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
return DECODE_success;
case 0xBF: /* movsx m16, r32/r64 */
- instr->instr = INSTR_MOVSX;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
+ mmio_op->instr = INSTR_MOVSX;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
index = get_index(opcode + 1, rex);
- instr->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
- instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
+ mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
+ mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
return DECODE_success;
case 0xA3: /* bt r32, m32 */
- instr->instr = INSTR_BT;
+ mmio_op->instr = INSTR_BT;
index = get_index(opcode + 1, rex);
- instr->op_size = LONG;
- instr->operand[0] = mk_operand(instr->op_size, index, 0, REGISTER);
- instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+ *op_size = LONG;
+ mmio_op->operand[0] = mk_operand(*op_size, index, 0, REGISTER);
+ mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
return DECODE_success;
case 0xBA:
- if (((opcode[1] >> 3) & 7) == 4) /* BT $imm8, m16/32/64 */
+ if ( ((opcode[1] >> 3) & 7) == 4 ) /* BT $imm8, m16/32/64 */
{
- instr->instr = INSTR_BT;
- GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
- instr->immediate =
- (signed char)get_immediate(realmode, opcode+1, BYTE);
- instr->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
- instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
+ mmio_op->instr = INSTR_BT;
+ GET_OP_SIZE_FOR_NONEBYTE(*op_size);
+ mmio_op->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
+ mmio_op->immediate =
+ (signed char)get_immediate(realmode, opcode + 1, BYTE);
+ mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
return DECODE_success;
}
else
@@ -692,9 +700,9 @@ static int hvm_decode(int realmode, unsi
int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int
inst_len)
{
- if (inst_len > MAX_INST_LEN || inst_len <= 0)
+ if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
return 0;
- if (hvm_copy_from_guest_virt(buf, guest_eip, inst_len))
+ if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len) )
return 0;
return inst_len;
}
@@ -723,8 +731,8 @@ void hvm_prod_vcpu(struct vcpu *v)
vcpu_unblock(v);
}
-void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
- unsigned long count, int size, long value, int dir, int
pvalid)
+void send_pio_req(unsigned long port, unsigned long count, int size,
+ long value, int dir, int df, int pvalid)
{
struct vcpu *v = current;
vcpu_iodata_t *vio;
@@ -753,7 +761,7 @@ void send_pio_req(struct cpu_user_regs *
p->size = size;
p->addr = port;
p->count = count;
- p->df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
+ p->df = df;
p->io_count++;
@@ -775,21 +783,18 @@ void send_pio_req(struct cpu_user_regs *
hvm_send_assist_req(v);
}
-static void send_mmio_req(
- unsigned char type, unsigned long gpa,
- unsigned long count, int size, long value, int dir, int pvalid)
+static void send_mmio_req(unsigned char type, unsigned long gpa,
+ unsigned long count, int size, long value,
+ int dir, int df, int pvalid)
{
struct vcpu *v = current;
vcpu_iodata_t *vio;
ioreq_t *p;
- struct cpu_user_regs *regs;
-
- if (size == 0 || count == 0) {
+
+ if ( size == 0 || count == 0 ) {
printf("null mmio request? type %d, gpa %lx, count %lx, size %d, value
%lx, dir %d, pvalid %d.\n",
type, gpa, count, size, value, dir, pvalid);
}
-
- regs = ¤t->arch.hvm_vcpu.io_op.io_context;
vio = get_vio(v->domain, v->vcpu_id);
if (vio == NULL) {
@@ -809,7 +814,7 @@ static void send_mmio_req(
p->size = size;
p->addr = gpa;
p->count = count;
- p->df = regs->eflags & EF_DF ? 1 : 0;
+ p->df = df;
p->io_count++;
@@ -830,58 +835,58 @@ static void send_mmio_req(
hvm_send_assist_req(v);
}
-static void mmio_operands(int type, unsigned long gpa, struct instruction
*inst,
- struct hvm_io_op *mmio_opp, struct cpu_user_regs
*regs)
+static void mmio_operands(int type, unsigned long gpa,
+ struct hvm_io_op *mmio_op,
+ unsigned char op_size)
{
unsigned long value = 0;
- int index, size_reg;
-
- size_reg = operand_size(inst->operand[0]);
-
- mmio_opp->flags = inst->flags;
- mmio_opp->instr = inst->instr;
- mmio_opp->operand[0] = inst->operand[0]; /* source */
- mmio_opp->operand[1] = inst->operand[1]; /* destination */
- mmio_opp->immediate = inst->immediate;
-
- if (inst->operand[0] & REGISTER) { /* dest is memory */
- index = operand_index(inst->operand[0]);
+ int df, index, size_reg;
+ struct cpu_user_regs *regs = &mmio_op->io_context;
+
+ df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
+
+ size_reg = operand_size(mmio_op->operand[0]);
+
+ if ( mmio_op->operand[0] & REGISTER ) { /* dest is memory */
+ index = operand_index(mmio_op->operand[0]);
value = get_reg_value(size_reg, index, 0, regs);
- send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
- } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
- value = inst->immediate;
- send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
- } else if (inst->operand[0] & MEMORY) { /* dest is register */
+ send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
+ } else if ( mmio_op->operand[0] & IMMEDIATE ) { /* dest is memory */
+ value = mmio_op->immediate;
+ send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
+ } else if ( mmio_op->operand[0] & MEMORY ) { /* dest is register */
/* send the request and wait for the value */
- if ( (inst->instr == INSTR_MOVZX) || (inst->instr == INSTR_MOVSX) )
- send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, 0);
+ if ( (mmio_op->instr == INSTR_MOVZX) ||
+ (mmio_op->instr == INSTR_MOVSX) )
+ send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, df, 0);
else
- send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0);
+ send_mmio_req(type, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
} else {
- printk("mmio_operands: invalid operand\n");
+ printk("%s: invalid dest mode.\n", __func__);
domain_crash_synchronous();
}
}
#define GET_REPEAT_COUNT() \
- (mmio_inst.flags & REPZ ? (realmode ? regs->ecx & 0xFFFF : regs->ecx) : 1)
-
-void handle_mmio(unsigned long va, unsigned long gpa)
+ (mmio_op->flags & REPZ ? (realmode ? regs->ecx & 0xFFFF : regs->ecx) : 1)
+
+void handle_mmio(unsigned long gpa)
{
unsigned long inst_addr;
- struct hvm_io_op *mmio_opp;
+ struct hvm_io_op *mmio_op;
struct cpu_user_regs *regs;
- struct instruction mmio_inst;
- unsigned char inst[MAX_INST_LEN];
- int i, realmode, ret, inst_len;
+ unsigned char inst[MAX_INST_LEN], op_size;
+ int i, realmode, df, inst_len;
struct vcpu *v = current;
- mmio_opp = &v->arch.hvm_vcpu.io_op;
- regs = &mmio_opp->io_context;
+ mmio_op = &v->arch.hvm_vcpu.io_op;
+ regs = &mmio_op->io_context;
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
hvm_store_cpu_guest_regs(v, regs, NULL);
+
+ df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
inst_len = hvm_instruction_length(regs, hvm_guest_x86_mode(v));
if ( inst_len <= 0 )
@@ -891,25 +896,21 @@ void handle_mmio(unsigned long va, unsig
}
realmode = hvm_realmode(v);
- if (realmode)
+ if ( realmode )
inst_addr = (regs->cs << 4) + regs->eip;
else
inst_addr = regs->eip;
memset(inst, 0, MAX_INST_LEN);
- ret = inst_copy_from_guest(inst, inst_addr, inst_len);
- if (ret != inst_len) {
+ if ( inst_copy_from_guest(inst, inst_addr, inst_len) != inst_len ) {
printk("handle_mmio: failed to copy instruction\n");
domain_crash_synchronous();
}
- init_instruction(&mmio_inst);
-
- if (hvm_decode(realmode, inst, &mmio_inst) == DECODE_failure) {
+ if ( hvm_decode(realmode, inst, mmio_op, &op_size) == DECODE_failure ) {
printk("handle_mmio: failed to decode instruction\n");
- printk("mmio opcode: va 0x%lx, gpa 0x%lx, len %d:",
- va, gpa, inst_len);
- for (i = 0; i < inst_len; i++)
+ printk("mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
+ for ( i = 0; i < inst_len; i++ )
printk(" %02x", inst[i] & 0xFF);
printk("\n");
domain_crash_synchronous();
@@ -917,24 +918,23 @@ void handle_mmio(unsigned long va, unsig
regs->eip += inst_len; /* advance %eip */
- switch (mmio_inst.instr) {
+ switch ( mmio_op->instr ) {
case INSTR_MOV:
- mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
+ mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
break;
case INSTR_MOVS:
{
unsigned long count = GET_REPEAT_COUNT();
- unsigned long size = mmio_inst.op_size;
- int sign = regs->eflags & EF_DF ? -1 : 1;
+ int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
unsigned long addr = 0;
- int dir;
+ int dir, size = op_size;
ASSERT(count);
/* determine non-MMIO address */
- if (realmode) {
- if (((regs->es << 4) + (regs->edi & 0xFFFF)) == va) {
+ if ( realmode ) {
+ if ( ((regs->es << 4) + (regs->edi & 0xFFFF)) == gpa ) {
dir = IOREQ_WRITE;
addr = (regs->ds << 4) + (regs->esi & 0xFFFF);
} else {
@@ -942,7 +942,7 @@ void handle_mmio(unsigned long va, unsig
addr = (regs->es << 4) + (regs->edi & 0xFFFF);
}
} else {
- if (va == regs->edi) {
+ if ( gpa == regs->edi ) {
dir = IOREQ_WRITE;
addr = regs->esi;
} else {
@@ -951,58 +951,61 @@ void handle_mmio(unsigned long va, unsig
}
}
- mmio_opp->flags = mmio_inst.flags;
- mmio_opp->instr = mmio_inst.instr;
-
- if (addr & (size - 1))
- DPRINTK("Unaligned ioport access: %lx, %ld\n", addr, size);
+ if ( addr & (size - 1) )
+ DPRINTK("Unaligned ioport access: %lx, %d\n", addr, size);
/*
* In case of a movs spanning multiple pages, we break the accesses
* up into multiple pages (the device model works with non-continguous
* physical guest pages). To copy just one page, we adjust %ecx and
- * do not advance %eip so that the next "rep movs" copies the next
page.
+ * do not advance %eip so that the next rep;movs copies the next page.
* Unaligned accesses, for example movsl starting at PGSZ-2, are
* turned into a single copy where we handle the overlapping memory
* copy ourself. After this copy succeeds, "rep movs" is executed
* again.
*/
- if ((addr & PAGE_MASK) != ((addr + sign * (size - 1)) & PAGE_MASK)) {
+ if ( (addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK) ) {
unsigned long value = 0;
DPRINTK("Single io request in a movs crossing page boundary.\n");
- mmio_opp->flags |= OVERLAP;
-
- regs->eip -= inst_len; /* do not advance %eip */
-
- if (dir == IOREQ_WRITE)
- (void)hvm_copy_from_guest_virt(&value, addr, size);
- send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
+ mmio_op->flags |= OVERLAP;
+
+ if ( dir == IOREQ_WRITE ) {
+ if ( hvm_paging_enabled(v) )
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
+ else
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
+ } else
+ mmio_op->addr = addr;
+
+ if ( count != 1 )
+ regs->eip -= inst_len; /* do not advance %eip */
+
+ send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, df, 0);
} else {
- if ((addr & PAGE_MASK) != ((addr + sign * (count * size - 1)) &
PAGE_MASK)) {
+ unsigned long last_addr = sign > 0 ? addr + count * size - 1
+ : addr - (count - 1) * size;
+
+ if ( (addr & PAGE_MASK) != (last_addr & PAGE_MASK) )
+ {
regs->eip -= inst_len; /* do not advance %eip */
- if (sign > 0) {
+ if ( sign > 0 )
count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
- } else {
- /* We need to make sure we advance to the point
- where the next request will be on a different
- page. If we're going down, that means
- advancing until one byte before the start of
- the page, hence +1. */
- count = ((addr + 1) & ~PAGE_MASK) / size;
- }
+ else
+ count = (addr & ~PAGE_MASK) / size + 1;
}
ASSERT(count);
- send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
+
+ send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, df, 1);
}
break;
}
case INSTR_MOVZX:
case INSTR_MOVSX:
- mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
+ mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
break;
case INSTR_STOS:
@@ -1010,10 +1013,8 @@ void handle_mmio(unsigned long va, unsig
* Since the destination is always in (contiguous) mmio space we don't
* need to break it up into pages.
*/
- mmio_opp->flags = mmio_inst.flags;
- mmio_opp->instr = mmio_inst.instr;
send_mmio_req(IOREQ_TYPE_COPY, gpa,
- GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax,
IOREQ_WRITE, 0);
+ GET_REPEAT_COUNT(), op_size, regs->eax, IOREQ_WRITE, df,
0);
break;
case INSTR_LODS:
@@ -1021,87 +1022,70 @@ void handle_mmio(unsigned long va, unsig
* Since the source is always in (contiguous) mmio space we don't
* need to break it up into pages.
*/
- mmio_opp->flags = mmio_inst.flags;
- mmio_opp->instr = mmio_inst.instr;
send_mmio_req(IOREQ_TYPE_COPY, gpa,
- GET_REPEAT_COUNT(), mmio_inst.op_size, 0, IOREQ_READ, 0);
+ GET_REPEAT_COUNT(), op_size, 0, IOREQ_READ, df, 0);
break;
case INSTR_OR:
- mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs);
+ mmio_operands(IOREQ_TYPE_OR, gpa, mmio_op, op_size);
break;
case INSTR_AND:
- mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs);
+ mmio_operands(IOREQ_TYPE_AND, gpa, mmio_op, op_size);
break;
case INSTR_XOR:
- mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs);
+ mmio_operands(IOREQ_TYPE_XOR, gpa, mmio_op, op_size);
break;
case INSTR_CMP: /* Pass through */
case INSTR_TEST:
case INSTR_SUB:
- mmio_opp->flags = mmio_inst.flags;
- mmio_opp->instr = mmio_inst.instr;
- mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
- mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
- mmio_opp->immediate = mmio_inst.immediate;
-
/* send the request and wait for the value */
- send_mmio_req(IOREQ_TYPE_COPY, gpa, 1,
- mmio_inst.op_size, 0, IOREQ_READ, 0);
+ send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
break;
case INSTR_BT:
+ {
+ unsigned long value = 0;
+ int index, size;
+
+ if ( mmio_op->operand[0] & REGISTER )
{
- unsigned long value = 0;
- int index, size;
-
- mmio_opp->instr = mmio_inst.instr;
- mmio_opp->operand[0] = mmio_inst.operand[0]; /* bit offset */
- mmio_opp->operand[1] = mmio_inst.operand[1]; /* bit base */
-
- if ( mmio_inst.operand[0] & REGISTER )
- {
- index = operand_index(mmio_inst.operand[0]);
- size = operand_size(mmio_inst.operand[0]);
- value = get_reg_value(size, index, 0, regs);
- }
- else if ( mmio_inst.operand[0] & IMMEDIATE )
- {
- mmio_opp->immediate = mmio_inst.immediate;
- value = mmio_inst.immediate;
- }
- send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
- mmio_inst.op_size, 0, IOREQ_READ, 0);
- break;
- }
+ index = operand_index(mmio_op->operand[0]);
+ size = operand_size(mmio_op->operand[0]);
+ value = get_reg_value(size, index, 0, regs);
+ }
+ else if ( mmio_op->operand[0] & IMMEDIATE )
+ {
+ mmio_op->immediate = mmio_op->immediate;
+ value = mmio_op->immediate;
+ }
+ send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
+ op_size, 0, IOREQ_READ, df, 0);
+ break;
+ }
case INSTR_XCHG:
- mmio_opp->flags = mmio_inst.flags;
- mmio_opp->instr = mmio_inst.instr;
- mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
- mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
- if ( mmio_inst.operand[0] & REGISTER ) {
+ if ( mmio_op->operand[0] & REGISTER ) {
long value;
- unsigned long operand = mmio_inst.operand[0];
+ unsigned long operand = mmio_op->operand[0];
value = get_reg_value(operand_size(operand),
operand_index(operand), 0,
regs);
/* send the request and wait for the value */
send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
- mmio_inst.op_size, value, IOREQ_WRITE, 0);
+ op_size, value, IOREQ_WRITE, df, 0);
} else {
/* the destination is a register */
long value;
- unsigned long operand = mmio_inst.operand[1];
+ unsigned long operand = mmio_op->operand[1];
value = get_reg_value(operand_size(operand),
operand_index(operand), 0,
regs);
/* send the request and wait for the value */
send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
- mmio_inst.op_size, value, IOREQ_WRITE, 0);
+ op_size, value, IOREQ_WRITE, df, 0);
}
break;
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Oct 18 15:13:41 2006 +0100
@@ -59,8 +59,6 @@ extern int inst_copy_from_guest(unsigned
int inst_len);
extern uint32_t vlapic_update_ppr(struct vlapic *vlapic);
extern asmlinkage void do_IRQ(struct cpu_user_regs *);
-extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
- unsigned long count, int size, long value, int dir,
int pvalid);
extern void svm_dump_inst(unsigned long eip);
extern int svm_dbg_on;
void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
@@ -1410,7 +1408,7 @@ static void svm_io_instruction(struct vc
struct cpu_user_regs *regs;
struct hvm_io_op *pio_opp;
unsigned int port;
- unsigned int size, dir;
+ unsigned int size, dir, df;
ioio_info_t info;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1429,6 +1427,8 @@ static void svm_io_instruction(struct vc
port = info.fields.port; /* port used to be addr */
dir = info.fields.type; /* direction */
+ df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
+
if (info.fields.sz32)
size = 4;
else if (info.fields.sz16)
@@ -1445,7 +1445,7 @@ static void svm_io_instruction(struct vc
if (info.fields.str)
{
unsigned long addr, count;
- int sign = regs->eflags & EF_DF ? -1 : 1;
+ int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
if (!svm_get_io_address(v, regs, dir, &count, &addr))
{
@@ -1475,25 +1475,37 @@ static void svm_io_instruction(struct vc
unsigned long value = 0;
pio_opp->flags |= OVERLAP;
-
- if (dir == IOREQ_WRITE)
- (void)hvm_copy_from_guest_virt(&value, addr, size);
-
- send_pio_req(regs, port, 1, size, value, dir, 0);
+ pio_opp->addr = addr;
+
+ if (dir == IOREQ_WRITE) /* OUTS */
+ {
+ if (hvm_paging_enabled(current))
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
+ else
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
+ }
+
+ if (count == 1)
+ regs->eip = vmcb->exitinfo2;
+
+ send_pio_req(port, 1, size, value, dir, df, 0);
}
else
{
- if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK))
+ unsigned long last_addr = sign > 0 ? addr + count * size - 1
+ : addr - (count - 1) * size;
+
+ if ((addr & PAGE_MASK) != (last_addr & PAGE_MASK))
{
if (sign > 0)
count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
else
- count = (addr & ~PAGE_MASK) / size;
+ count = (addr & ~PAGE_MASK) / size + 1;
}
else
regs->eip = vmcb->exitinfo2;
- send_pio_req(regs, port, count, size, addr, dir, 1);
+ send_pio_req(port, count, size, addr, dir, df, 1);
}
}
else
@@ -1507,7 +1519,7 @@ static void svm_io_instruction(struct vc
if (port == 0xe9 && dir == IOREQ_WRITE && size == 1)
hvm_print_line(v, regs->eax); /* guest debug output */
- send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
+ send_pio_req(port, 1, size, regs->eax, dir, df, 0);
}
}
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Oct 18 15:13:41 2006 +0100
@@ -1041,14 +1041,20 @@ static void vmx_vmexit_do_invlpg(unsigne
}
-static int check_for_null_selector(unsigned long eip)
+static int check_for_null_selector(unsigned long eip, int inst_len, int dir)
{
unsigned char inst[MAX_INST_LEN];
unsigned long sel;
- int i, inst_len;
+ int i;
int inst_copy_from_guest(unsigned char *, unsigned long, int);
- inst_len = __get_instruction_length(); /* Safe: INS/OUTS */
+ /* INS can only use ES segment register, and it can't be overridden */
+ if ( dir == IOREQ_READ )
+ {
+ __vmread(GUEST_ES_SELECTOR, &sel);
+ return sel == 0 ? 1 : 0;
+ }
+
memset(inst, 0, MAX_INST_LEN);
if ( inst_copy_from_guest(inst, eip, inst_len) != inst_len )
{
@@ -1093,18 +1099,13 @@ static int check_for_null_selector(unsig
return 0;
}
-extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
- unsigned long count, int size, long value,
- int dir, int pvalid);
-
static void vmx_io_instruction(unsigned long exit_qualification,
unsigned long inst_len)
{
struct cpu_user_regs *regs;
struct hvm_io_op *pio_opp;
- unsigned long eip, cs, eflags;
- unsigned long port, size, dir;
- int vm86;
+ unsigned long port, size;
+ int dir, df, vm86;
pio_opp = ¤t->arch.hvm_vcpu.io_op;
pio_opp->instr = INSTR_PIO;
@@ -1116,28 +1117,26 @@ static void vmx_io_instruction(unsigned
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
hvm_store_cpu_guest_regs(current, regs, NULL);
- eip = regs->eip;
- cs = regs->cs;
- eflags = regs->eflags;
-
- vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
-
- HVM_DBG_LOG(DBG_LEVEL_IO,
- "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
+ vm86 = regs->eflags & X86_EFLAGS_VM ? 1 : 0;
+ df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
+
+ HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
"exit_qualification = %lx",
- vm86, cs, eip, exit_qualification);
-
- if (test_bit(6, &exit_qualification))
+ vm86, regs->cs, (unsigned long)regs->eip, exit_qualification);
+
+ if ( test_bit(6, &exit_qualification) )
port = (exit_qualification >> 16) & 0xFFFF;
else
port = regs->edx & 0xffff;
- TRACE_VMEXIT(1, port);
+
+ TRACE_VMEXIT(1,port);
+
size = (exit_qualification & 7) + 1;
dir = test_bit(3, &exit_qualification); /* direction */
- if (test_bit(4, &exit_qualification)) { /* string instruction */
+ if ( test_bit(4, &exit_qualification) ) { /* string instruction */
unsigned long addr, count = 1;
- int sign = regs->eflags & EF_DF ? -1 : 1;
+ int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
__vmread(GUEST_LINEAR_ADDRESS, &addr);
@@ -1145,10 +1144,10 @@ static void vmx_io_instruction(unsigned
* In protected mode, guest linear address is invalid if the
* selector is null.
*/
- if (!vm86 && check_for_null_selector(eip))
+ if ( !vm86 && check_for_null_selector(regs->eip, inst_len, dir) )
addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
- if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
+ if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */
pio_opp->flags |= REPZ;
count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
}
@@ -1157,30 +1156,45 @@ static void vmx_io_instruction(unsigned
* Handle string pio instructions that cross pages or that
* are unaligned. See the comments in hvm_domain.c/handle_mmio()
*/
- if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
+ if ( (addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK) ) {
unsigned long value = 0;
pio_opp->flags |= OVERLAP;
- if (dir == IOREQ_WRITE)
- (void)hvm_copy_from_guest_virt(&value, addr, size);
- send_pio_req(regs, port, 1, size, value, dir, 0);
+
+ if ( dir == IOREQ_WRITE ) /* OUTS */
+ {
+ if ( hvm_paging_enabled(current) )
+ (void)hvm_copy_from_guest_virt(&value, addr, size);
+ else
+ (void)hvm_copy_from_guest_phys(&value, addr, size);
+ } else
+ pio_opp->addr = addr;
+
+ if ( count == 1 )
+ regs->eip += inst_len;
+
+ send_pio_req(port, 1, size, value, dir, df, 0);
} else {
- if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK))
{
- if (sign > 0)
+ unsigned long last_addr = sign > 0 ? addr + count * size - 1
+ : addr - (count - 1) * size;
+
+ if ( (addr & PAGE_MASK) != (last_addr & PAGE_MASK) )
+ {
+ if ( sign > 0 )
count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
else
- count = (addr & ~PAGE_MASK) / size;
+ count = (addr & ~PAGE_MASK) / size + 1;
} else
regs->eip += inst_len;
- send_pio_req(regs, port, count, size, addr, dir, 1);
+ send_pio_req(port, count, size, addr, dir, df, 1);
}
} else {
- if (port == 0xe9 && dir == IOREQ_WRITE && size == 1)
+ if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
hvm_print_line(current, regs->eax); /* guest debug output */
regs->eip += inst_len;
- send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
+ send_pio_req(port, 1, size, regs->eax, dir, df, 0);
}
}
diff -r 21f8c507da29 -r bd6d4a499e47 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Oct 18 15:13:41 2006 +0100
@@ -2880,7 +2880,7 @@ static int sh_page_fault(struct vcpu *v,
shadow_audit_tables(v);
reset_early_unshadow(v);
shadow_unlock(d);
- handle_mmio(va, gpa);
+ handle_mmio(gpa);
return EXCRET_fault_fixed;
not_a_shadow_fault:
diff -r 21f8c507da29 -r bd6d4a499e47 xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/include/asm-x86/hvm/io.h Wed Oct 18 15:13:41 2006 +0100
@@ -24,11 +24,6 @@
#include <asm/hvm/vioapic.h>
#include <public/hvm/ioreq.h>
#include <public/event_channel.h>
-
-#define MAX_OPERAND_NUM 2
-
-#define mk_operand(size_reg, index, seg, flag) \
- (((size_reg) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
#define operand_size(operand) \
((operand >> 24) & 0xFF)
@@ -70,29 +65,23 @@
#define INSTR_XCHG 14
#define INSTR_SUB 15
-struct instruction {
- __s8 instr; /* instruction type */
- __s16 op_size; /* the operand's bit size, e.g. 16-bit or 32-bit */
- __u64 immediate;
- __u16 seg_sel; /* segmentation selector */
- __u32 operand[MAX_OPERAND_NUM]; /* order is AT&T assembly */
- __u32 flags;
-};
-
#define MAX_INST_LEN 15 /* Maximum instruction length = 15 bytes */
struct hvm_io_op {
- int flags;
- int instr; /* instruction */
- unsigned long operand[2]; /* operands */
- unsigned long immediate; /* immediate portion */
- struct cpu_user_regs io_context; /* current context */
+ unsigned int instr; /* instruction */
+ unsigned int flags;
+ unsigned long addr; /* virt addr for overlap PIO/MMIO */
+ struct {
+ unsigned int operand[2]; /* operands */
+ unsigned long immediate; /* immediate portion */
+ };
+ struct cpu_user_regs io_context; /* current context */
};
#define MAX_IO_HANDLER 8
-#define VMX_PORTIO 0
-#define VMX_MMIO 1
+#define HVM_PORTIO 0
+#define HVM_MMIO 1
typedef int (*intercept_action_t)(ioreq_t *);
typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v,
@@ -131,16 +120,17 @@ extern int register_io_handler(unsigned
static inline int hvm_portio_intercept(ioreq_t *p)
{
- return hvm_io_intercept(p, VMX_PORTIO);
+ return hvm_io_intercept(p, HVM_PORTIO);
}
-int hvm_mmio_intercept(ioreq_t *p);
+extern int hvm_mmio_intercept(ioreq_t *p);
+extern int hvm_buffered_io_intercept(ioreq_t *p);
static inline int register_portio_handler(unsigned long addr,
unsigned long size,
intercept_action_t action)
{
- return register_io_handler(addr, size, action, VMX_PORTIO);
+ return register_io_handler(addr, size, action, HVM_PORTIO);
}
#if defined(__i386__) || defined(__x86_64__)
@@ -150,7 +140,9 @@ static inline int irq_masked(unsigned lo
}
#endif
-extern void handle_mmio(unsigned long, unsigned long);
+extern void send_pio_req(unsigned long port, unsigned long count, int size,
+ long value, int dir, int df, int pvalid);
+extern void handle_mmio(unsigned long gpa);
extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
extern void hvm_io_assist(struct vcpu *v);
extern void pic_irq_request(void *data, int level);
diff -r 21f8c507da29 -r bd6d4a499e47 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Oct 18 14:46:48 2006 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Oct 18 15:13:41 2006 +0100
@@ -142,10 +142,6 @@ int hvm_copy_from_guest_virt(void *buf,
int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
void hvm_setup_platform(struct domain* d);
-int hvm_mmio_intercept(ioreq_t *p);
-int hvm_io_intercept(ioreq_t *p, int type);
-int hvm_buffered_io_intercept(ioreq_t *p);
-void hvm_hooks_assist(struct vcpu *v);
void hvm_print_line(struct vcpu *v, const char c);
void hlt_timer_fn(void *data);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|