| FYI
While tracing virtio-blk crash issues, I have found cpu_physical_memory_map()
in upstream-qemu+xen behaves different with others. This prevents proper work
of virtio-ring bundled in qemu.
When a caller requests more than 2 guest-physical pages, the function
will map the pages in host-virtual, as possible.
In kvm+qemu, the region residents always sequential in host-virtual,
so it will work perfectly.
However, in xen+qemu, the region mapping is sometimes fragmented and partial.
According to the comment of cpu_physical_memory_map(), it does not
grantee mapping
of whole range that caller requested. However, virtio backend drivers in qemu
expect all requested guest-physical pages are mapped to host-virtual
sequentially.
# Sorry for no patch; I have no good idea to fix this right now.
Thanks,
Takeshi
qemu-dm-v14/hw/virtio.c:
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
    size_t num_sg, int is_write)
{
    unsigned int i;
    target_phys_addr_t len;
    for (i = 0; i < num_sg; i++) {
        len = sg[i].iov_len;
        sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
        if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
            error_report("virtio: trying to map MMIO memory");
            exit(1); // BOMB!!
        }
    }
}
qemu-dm-v14/exec.c:
   3978     while (len > 0) {
                (snip)
   3990         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
   3991             if (done || bounce.buffer) {
   3992                 break;
   3993             }
   3994             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
   3995             bounce.addr = addr;
   3996             bounce.len = l;
   3997             if (!is_write) {
   3998                 cpu_physical_memory_read(addr, bounce.buffer, l);
   3999             }
   4000             ptr = bounce.buffer;
   4001         } else {
   4002             addr1 = (pd & TARGET_PAGE_MASK) + (addr &
~TARGET_PAGE_MASK);
   4003             ptr = qemu_get_ram_ptr(addr1);
                    // KVM returns virtual address sequentially, but
Xen does not.
   4004         }
   4005         if (!done) {
   4006             ret = ptr;
   4007         } else if (ret + done != ptr) {
                    // This break triggered especially in xen+upstream-qemu
   4008             break;
   4009         }
   4010
   4011         len -= l;
   4012         addr += l;
   4013         done += l;
   4014     }
   4015     *plen = done;
   4016     return ret;
   4017 }
-- 
Takeshi HASEGAWA <hasegaw@xxxxxxxxx>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
 |