diff --git a/hw/pass-through.c b/hw/pass-through.c index 014144e..d27aeea 100644 --- a/hw/pass-through.c +++ b/hw/pass-through.c @@ -1379,9 +1379,17 @@ static void pt_ioport_map(PCIDevice *d, int i, if (e_phys != -1) { /* Create new mapping */ - ret = xc_domain_ioport_mapping(xc_handle, domid, e_phys, - assigned_device->bases[i].access.pio_base, e_size, - DPCI_ADD_MAPPING); + if ( vga_skip_ioport_map(d) ) + { + assigned_device->bases[i].e_physbase = -1; + } + else + { + ret = xc_domain_ioport_mapping(xc_handle, domid, e_phys, + assigned_device->bases[i].access.pio_base, e_size, + DPCI_ADD_MAPPING); + } + if ( ret != 0 ) { PT_LOG("Error: create new mapping failed!\n"); diff --git a/hw/pass-through.h b/hw/pass-through.h index e59eb52..19652c4 100644 --- a/hw/pass-through.h +++ b/hw/pass-through.h @@ -411,6 +411,11 @@ int pt_pci_host_write(int bus, int dev, int fn, u32 addr, u32 val, int len); void intel_pch_init(PCIBus *bus); int register_vga_regions(struct pt_dev *real_device); int unregister_vga_regions(struct pt_dev *real_device); +int vga_skip_ioport_map(PCIDevice *d); +int igd_register_vga_regions(struct pt_dev *real_device); +int igd_unregister_vga_regions(struct pt_dev *real_device); +int ati_register_vga_regions(struct pt_dev *real_device); +int ati_unregister_vga_regions(struct pt_dev *real_device); int setup_vga_pt(struct pt_dev *real_device); PCIBus *intel_pci_bridge_init(PCIBus *bus, int devfn, uint16_t vid, uint16_t did, const char *name, uint16_t revision); diff --git a/hw/pci.h b/hw/pci.h index e4cc79a..4aa0373 100644 --- a/hw/pci.h +++ b/hw/pci.h @@ -54,6 +54,8 @@ extern target_phys_addr_t pci_mem_base; #define PCI_VENDOR_ID_CIRRUS 0x1013 +#define PCI_VENDOR_ID_ATI 0x1002 + #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_OPENPIC2 0xffff diff --git a/hw/pt-graphics.c b/hw/pt-graphics.c index 5dfcca2..b8f5a3a 100644 --- a/hw/pt-graphics.c +++ b/hw/pt-graphics.c @@ -8,11 +8,213 @@ #include #include +#include #include extern int gfx_passthru; extern int igd_passthru; +/*********************************/ +/* Code for ATI GFX Passthru */ +/*********************************/ +/* ATI VBIOS Working Mechanism + * + * Generally there are three memory resources (two MMIO and one PIO) + * associated with modern ATI gfx. VBIOS uses special tricks to figure out + * BARs, instead of using regular PCI config space read. + * + * (1) VBIOS relies on I/O port 0x3C3 to retrieve PIO BAR + * (2) VBIOS maintains a shadow copy of PCI configure space. It retries the + * MMIO BARs from this shadow copy via sending I/O requests to first two + * registers of PIO (MMINDEX and MMDATA). The workflow is like this: + * MMINDEX (register 0) is written with an index value, specifying the + * register VBIOS wanting to access. Then the shadowed data can be + * read/written from MMDATA (register 1). For two MMIO BARs, the index + * values are 0x4010 and 0x4014 respectively. + * + */ + +#define ATI_BAR1_INDEX 0 //MMIO BAR1 +#define ATI_BAR2_INDEX 1 //MMIO BAR2 +#define ATI_BAR5_INDEX 4 //PIO BAR == BAR5 + +#define ATI_BAR1_MMINDEX 0x4010 //data written to MMINDEX for MMIO BAR1 +#define ATI_BAR2_MMINDEX 0x4014 //data written to MMINDEX FOR MMIO BAR2 + +struct ati_gfx_info { + int initialized; /* initialized already? */ + + /* PIO */ + uint32_t host_pio_base; /* host base addr of PIO */ + uint32_t guest_pio_base; /* guest base addr of PIO */ + uint32_t pio_size; /* PIO size */ + + /* MMIO */ + uint32_t guest_mmio_base1; /* guest base addr of MMIO 1 */ + uint32_t guest_mmio_base2; /* guest base addr of MMIO 2 */ + + /* PIO MMINDEX access recording */ + uint32_t pre_mmindex_data; /* previous data written to MMINDEX */ +}; + +static struct ati_gfx_info gfx_info; + +/* Convert guest PIO port to host PIO port */ +static uint16_t gport_to_hport(uint16_t gport) +{ + return (gport - gfx_info.guest_pio_base) + gfx_info.host_pio_base; +} + +/* Read host PIO port */ +static uint32_t ati_hw_in(uint16_t hport) +{ + unsigned val; + + ioperm(gfx_info.host_pio_base, gfx_info.pio_size, 1); + asm volatile ("in %1,%0":"=a"(val):"Nd"(hport)); + ioperm(gfx_info.host_pio_base, gfx_info.pio_size, 0); + + return val; +} + +/* Write data to host PIO */ +static void ati_hw_out(uint16_t hport, uint32_t data) +{ + ioperm(gfx_info.host_pio_base, gfx_info.pio_size, 1); + asm volatile ("out %1, %0"::"Nd"(hport),"a"(data)); + ioperm(gfx_info.host_pio_base, gfx_info.pio_size, 0); +} + +static uint32_t ati_io_regs_read(void *opaque, uint32_t addr) +{ + uint32_t val; + + val = ati_hw_in(gport_to_hport(addr)); + + /* tweak the value if VBIOS is reading MMIO BAR1 and BAR2 */ + if ( addr == (gfx_info.guest_pio_base + 4) ) + { + switch ( gfx_info.pre_mmindex_data ) + { + case ATI_BAR1_MMINDEX: + val = gfx_info.guest_mmio_base1 | (val & 0x0000000f); + break; + case ATI_BAR2_MMINDEX: + val = gfx_info.guest_mmio_base2 | (val & 0x0000000f); + break; + default: + break; + } + } + + return val; +} + +static void ati_io_regs_write(void *opaque, uint32_t addr, uint32_t val) +{ + ati_hw_out(gport_to_hport(addr), val); + + /* book keeping */ + if ( addr == gfx_info.guest_pio_base ) + gfx_info.pre_mmindex_data = val; +} + +static void ati_gfx_init(struct pt_dev *assigned) +{ + PCIDevice *dev = (PCIDevice *)&assigned->dev; + + register_ioport_read(dev->io_regions[ATI_BAR5_INDEX].addr, + dev->io_regions[ATI_BAR5_INDEX].size, 4, ati_io_regs_read, assigned); + + register_ioport_write(dev->io_regions[ATI_BAR5_INDEX].addr, + dev->io_regions[ATI_BAR5_INDEX].size, 4, ati_io_regs_write, assigned); + + /* initialize IO registers */ + gfx_info.guest_pio_base = dev->io_regions[ATI_BAR5_INDEX].addr; + gfx_info.pio_size = dev->io_regions[ATI_BAR5_INDEX].size; + gfx_info.host_pio_base = assigned->bases[ATI_BAR5_INDEX].access.pio_base; + + gfx_info.guest_mmio_base1 = dev->io_regions[ATI_BAR1_INDEX].addr; + gfx_info.guest_mmio_base2 = dev->io_regions[ATI_BAR2_INDEX].addr; + gfx_info.initialized = 1; + + PT_LOG("guest_pio_bar = 0x%x, host_pio_bar = 0x%x, pio_size=0x%x " + "guest_mmio_bar1=0x%x, guest_mmio_bar2=0x%x\n", + gfx_info.guest_pio_base, gfx_info.host_pio_base, gfx_info.pio_size, + gfx_info.guest_mmio_base1, gfx_info.guest_mmio_base2); +} + +static uint32_t ati_legacy_io_read(void *opaque, uint32_t addr) +{ + struct pt_dev *assigned_device = opaque; + PCIDevice *dev = (PCIDevice *)&assigned_device->dev; + uint32_t val = 0xFF; + + switch( addr ) + { + case 0x3c3: + val = dev->io_regions[ATI_BAR5_INDEX].addr >> 8; + /* Intercept GFX IO registers. This supposes to happen in + * ati_register_vga_regions(). But we cannot get guest phys IO BAR + * over there. */ + if ( !gfx_info.initialized ) + ati_gfx_init(assigned_device); + break; + default: + PT_LOG("ERROR: port 0x%x I/O read not handled\n", addr); + break; + } + + return val; +} + +static void ati_legacy_io_write(void *opaque, uint32_t addr, uint32_t val) +{ + PT_LOG("ERROR: port 0x%x I/O write not handled\n", addr); +} + +int ati_register_vga_regions(struct pt_dev *real_device) +{ + PCIDevice *dev = (PCIDevice *)&real_device->dev; + int ret = 0; + + /* We need to intercept VBIOS accesses to port 0x3C3, which returns + * device port I/O BAR. For the rest of legacy I/O ports, we allow direct + * accesses. + */ + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, + 0x3C0, 0x3, DPCI_ADD_MAPPING); + + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C4, + 0x3C4, 0x1C, DPCI_ADD_MAPPING); + + register_ioport_read(0x3c3, 1, 1, ati_legacy_io_read, real_device); + register_ioport_write(0x3c3, 1, 1, ati_legacy_io_write, real_device); + + /* initialized on the first port 0x3C3 access in ati_gfx_init */ + gfx_info.initialized = 0; + + return ret; +} + +int ati_unregister_vga_regions(struct pt_dev *real_device) +{ + int ret = 0; + + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, + 0x3C0, 0x3, DPCI_REMOVE_MAPPING); + + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C4, + 0x3C4, 0x1C, DPCI_REMOVE_MAPPING); + + gfx_info.initialized = 0; + + return ret; +} + +/*********************************/ +/* Code for Intel IGD Passthru */ +/*********************************/ static int pch_map_irq(PCIDevice *pci_dev, int irq_num) { PT_LOG("pch_map_irq called\n"); @@ -88,6 +290,77 @@ uint32_t igd_pci_read(PCIDevice *pci_dev, int config_addr, int len) return val; } +int igd_register_vga_regions(struct pt_dev *real_device) +{ + u32 vendor_id, igd_opregion; + int ret = 0; + + /* legacy I/O ports 0x3C0 -- 0x3E0 */ + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, + 0x3C0, 0x20, DPCI_ADD_MAPPING); + + /* 1:1 map ASL Storage register value */ + vendor_id = pt_pci_host_read(0, 2, 0, 0, 2); + igd_opregion = pt_pci_host_read(0, 2, 0, 0xfc, 4); + if ( (vendor_id == 0x8086) && igd_opregion ) + { + ret |= xc_domain_memory_mapping(xc_handle, domid, + igd_opregion >> XC_PAGE_SHIFT, + igd_opregion >> XC_PAGE_SHIFT, + 2, + DPCI_ADD_MAPPING); + PT_LOG("register_vga: igd_opregion = %x\n", igd_opregion); + } + + return ret; +} + +int igd_unregister_vga_regions(struct pt_dev *real_device) +{ + u32 vendor_id, igd_opregion; + int ret = 0; + + ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, + 0x3C0, 0x20, DPCI_REMOVE_MAPPING); + + vendor_id = pt_pci_host_read(0, 2, 0, 0, 2); + igd_opregion = pt_pci_host_read(0, 2, 0, 0xfc, 4); + if ( (vendor_id == 0x8086) && igd_opregion ) + { + ret |= xc_domain_memory_mapping(xc_handle, domid, + igd_opregion >> XC_PAGE_SHIFT, + igd_opregion >> XC_PAGE_SHIFT, + 2, + DPCI_REMOVE_MAPPING); + } + + return ret; +} +/*********************************/ +/* Generic Code for GFX Passthru */ +/*********************************/ +/* This function decides whether I/O port map should be skipped */ +int vga_skip_ioport_map(PCIDevice *d) +{ + struct pt_dev *dev = (struct pt_dev *)d; + int skip = 0; + + if ( !gfx_passthru || dev->pci_dev->device_class != 0x0300 ) + return 0; + + switch( dev->pci_dev->vendor_id ) + { + case PCI_VENDOR_ID_ATI: + case PCI_VENDOR_ID_AMD: + skip = 1; + break; + default: + skip = 0; + break; + } + + return skip; +} /* * register VGA resources for the domain with assigned gfx */ @@ -99,29 +372,31 @@ int register_vga_regions(struct pt_dev *real_device) if ( !gfx_passthru || real_device->pci_dev->device_class != 0x0300 ) return ret; + /* legacy I/O ports 0x3B0 - 0x3BC */ ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3B0, 0x3B0, 0xC, DPCI_ADD_MAPPING); - - ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, - 0x3C0, 0x20, DPCI_ADD_MAPPING); - + + /* legacy video MMIO range 0xA0000 - 0xBFFFF */ ret |= xc_domain_memory_mapping(xc_handle, domid, 0xa0000 >> XC_PAGE_SHIFT, 0xa0000 >> XC_PAGE_SHIFT, 0x20, DPCI_ADD_MAPPING); - /* 1:1 map ASL Storage register value */ - vendor_id = pt_pci_host_read(0, 2, 0, 0, 2); - igd_opregion = pt_pci_host_read(0, 2, 0, 0xfc, 4); - if ( (vendor_id == 0x8086) && igd_opregion ) + /* Other VGA regions are vendor specific */ + switch( real_device->pci_dev->vendor_id ) { - ret |= xc_domain_memory_mapping(xc_handle, domid, - igd_opregion >> XC_PAGE_SHIFT, - igd_opregion >> XC_PAGE_SHIFT, - 2, - DPCI_ADD_MAPPING); - PT_LOG("register_vga: igd_opregion = %x\n", igd_opregion); + case PCI_VENDOR_ID_INTEL: + ret = igd_register_vga_regions(real_device); + break; + case PCI_VENDOR_ID_ATI: + case PCI_VENDOR_ID_AMD: + ret = ati_register_vga_regions(real_device); + break; + default: + PT_LOG("gfx card wasn't supported by Xen passthru!\n"); + ret = 1; + break; } if ( ret != 0 ) @@ -135,33 +410,36 @@ int register_vga_regions(struct pt_dev *real_device) */ int unregister_vga_regions(struct pt_dev *real_device) { - u32 vendor_id, igd_opregion; int ret = 0; if ( !gfx_passthru || real_device->pci_dev->device_class != 0x0300 ) return ret; + /* legacy I/O ports 0x3B0 - 0x3BC */ ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3B0, 0x3B0, 0xC, DPCI_REMOVE_MAPPING); - ret |= xc_domain_ioport_mapping(xc_handle, domid, 0x3C0, - 0x3C0, 0x20, DPCI_REMOVE_MAPPING); - + /* legacy video MMIO range 0xA0000 - 0xBFFFF */ ret |= xc_domain_memory_mapping(xc_handle, domid, 0xa0000 >> XC_PAGE_SHIFT, 0xa0000 >> XC_PAGE_SHIFT, 20, DPCI_REMOVE_MAPPING); - vendor_id = pt_pci_host_read(0, 2, 0, 0, 2); - igd_opregion = pt_pci_host_read(0, 2, 0, 0xfc, 4); - if ( (vendor_id == 0x8086) && igd_opregion ) + /* Other VGA regions are vendor specific */ + switch( real_device->pci_dev->vendor_id ) { - ret |= xc_domain_memory_mapping(xc_handle, domid, - igd_opregion >> XC_PAGE_SHIFT, - igd_opregion >> XC_PAGE_SHIFT, - 2, - DPCI_REMOVE_MAPPING); + case PCI_VENDOR_ID_INTEL: + ret = igd_unregister_vga_regions(real_device); + break; + case PCI_VENDOR_ID_ATI: + case PCI_VENDOR_ID_AMD: + ret = ati_unregister_vga_regions(real_device); + break; + default: + PT_LOG("gfx card wasn't supported by Xen passthru!\n"); + ret = 1; + break; } if ( ret != 0 )