On Tue, Feb 24, 2009 at 09:18:46AM +1100, Simon Horman wrote:
> On Mon, Feb 23, 2009 at 03:31:30AM -0800, Keir Fraser wrote:
> > On 22/02/2009 22:55, "Simon Horman" <horms@xxxxxxxxxxxx> wrote:
> >
> > >> The another solution is expanding GSI to 127. I don't sure it is
> > >> possible, but sharing virtual GSI will not occur.
> > >
> > > That thought crossed my mind too, I will investigate further.
> > > But I think that ideally it would need to be expanded to 143
> > > as the first 16 GSI are currently reserved for ISA.
> >
> > Can't xend or qemu do a search of PCI slot space to pick a virtual
> > devfn:intx that doesn't conflict (or that purposely does conflict, if there
> > are any cases where you want that)? 32 non-legacy GSIs should be enough to
> > avoid any aliasing if a bit of effort is put in to avoid it.
>
> That was what I had in mind in my original proposal.
> I'll try coding it up and see what it looks like.
As per the patch below I tried coding this up.
Unfortuantely in my test cases it seems that an rtl8139 ioemu device
doesn't work unless its given the same girq that the original mapping
yeilded. I guess this is because the patch below isn't sufficient to wire
up the girq for the ioemu case. I am investigating why.
Index: xen-unstable.hg/xen/arch/x86/hvm/irq.c
===================================================================
--- xen-unstable.hg.orig/xen/arch/x86/hvm/irq.c 2009-03-02 15:02:28.000000000
+1100
+++ xen-unstable.hg/xen/arch/x86/hvm/irq.c 2009-03-02 15:09:51.000000000
+1100
@@ -26,6 +26,127 @@
#include <asm/hvm/domain.h>
#include <asm/hvm/support.h>
+#define GSI_F_PASS_THROUGH 0x80
+#define GSI_F_MASK 0x7f
+
+static int gsi_is_pass_through(struct hvm_irq *hvm_irq, uint8_t gsi)
+{
+ int i;
+
+ for ( i = 0; i < NR_PCI_DEVINTX; i++ )
+ {
+ if ( (hvm_irq->pci_devintx_gsi[i] & GSI_F_MASK) == gsi &&
+ hvm_irq->pci_devintx_gsi[i] & GSI_F_PASS_THROUGH )
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Must be protected by d->arch.hvm_domain.irq_lock
+ * as d->arch.hvm_domain.irq is accessed
+ */
+static uint8_t __hvm_pci_intx_gsi_find(struct domain *d, uint32_t device,
+ uint32_t intx, uint8_t flag)
+{
+ uint8_t gsi;
+ int cnt, devintx = PCI_DEVINTX(device, intx);
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ int seed = 0; /* XXX: remove this, it is for testing only */
+
+ ASSERT((device <= NR_PCI_DEV) && (intx <= NR_PCI_INTX));
+
+ if ( (gsi = (hvm_irq->pci_devintx_gsi[devintx] & GSI_F_MASK)) )
+ goto out;
+
+ /* Find the gsi with the lowest usage count that
+ * is not used by a pass-through device
+ */
+ for ( cnt = 0; cnt < NR_PCI_DEVINTX; cnt++ )
+ {
+ for ( gsi = NR_ISAIRQS + seed; gsi < VIOAPIC_NUM_PINS; gsi++ )
+ {
+ if ( hvm_irq->pci_gsi_cnt[gsi - NR_ISAIRQS] != cnt ||
+ gsi_is_pass_through(hvm_irq, gsi) )
+ continue;
+ goto out;
+ }
+ }
+
+ /* If this device isn't a pass-through device,
+ * then it may share a gsi with a pass-through device
+ */
+ if (flag & GSI_F_PASS_THROUGH)
+ goto err;
+
+ for ( cnt = 0; cnt < NR_PCI_DEVINTX; cnt++ )
+ {
+ for ( gsi = NR_ISAIRQS; gsi < VIOAPIC_NUM_PINS; gsi++ )
+ {
+ if ( hvm_irq->pci_gsi_cnt[gsi - NR_ISAIRQS] != cnt)
+ continue;
+ goto out;
+ }
+
+ }
+
+err:
+ gdprintk(XENLOG_ERR, "HVM: No GSI available for dev=%d intx=%d\n",
+ device, intx);
+ return NR_PCI_DEVINTX;
+
+out:
+ hvm_irq->pci_devintx_gsi[devintx] = gsi | flag;
+ hvm_irq->pci_gsi_cnt[gsi - NR_ISAIRQS]++;
+ return gsi;
+}
+
+/* Must be protected by d->arch.hvm_domain.irq_lock
+ * as d->arch.hvm_domain.irq is accessed
+ */
+uint8_t hvm_pci_intx_gsi_find(struct domain *d, uint32_t device, uint32_t intx)
+{
+ uint8_t gsi;
+
+ gsi = __hvm_pci_intx_gsi_find(d, device, intx, 0);
+
+ dprintk(XENLOG_G_INFO, "%s(%d, %u, %u) = %u\n",
+ __func__, d->domain_id, device, intx, gsi);
+ return gsi;
+}
+
+uint8_t hvm_pci_intx_gsi_bind_pt(struct domain *d, uint32_t device,
+ uint32_t intx)
+{
+ uint8_t gsi;
+
+ spin_lock(&d->arch.hvm_domain.irq_lock);
+ gsi = __hvm_pci_intx_gsi_find(d, device, intx, GSI_F_PASS_THROUGH);
+ spin_unlock(&d->arch.hvm_domain.irq_lock);
+
+ dprintk(XENLOG_G_INFO, "%s(%d, %u, %u) = %u\n",
+ __func__, d->domain_id, device, intx, gsi);
+ return gsi;
+}
+
+uint8_t hvm_pci_intx_gsi_unbind_pt(struct domain *d, uint32_t device,
+ uint32_t intx)
+{
+ uint8_t gsi;
+ int devintx = PCI_DEVINTX(device, intx);
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ spin_lock(&d->arch.hvm_domain.irq_lock);
+ gsi = __hvm_pci_intx_gsi_find(d, device, intx, GSI_F_PASS_THROUGH);
+ hvm_irq->pci_devintx_gsi[devintx] = 0;
+ hvm_irq->pci_gsi_cnt[gsi - NR_ISAIRQS]--;
+ spin_unlock(&d->arch.hvm_domain.irq_lock);
+
+ dprintk(XENLOG_G_INFO, "%s(%d, %u, %u) = %u\n",
+ __func__, d->domain_id, device, intx, gsi);
+ return gsi;
+}
+
static void __hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx)
{
@@ -37,7 +158,11 @@ static void __hvm_pci_intx_assert(
if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
return;
- gsi = hvm_pci_intx_gsi(device, intx);
+ gsi = hvm_pci_intx_gsi_find(d, device, intx);
+ /* XXX: Need better error handling */
+ if ( gsi == VIOAPIC_NUM_PINS )
+ return;
+
if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
vioapic_irq_positive_edge(d, gsi);
@@ -70,7 +195,10 @@ static void __hvm_pci_intx_deassert(
if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
return;
- gsi = hvm_pci_intx_gsi(device, intx);
+ gsi = hvm_pci_intx_gsi_find(d, device, intx);
+ /* XXX: Need better error handling */
+ if ( gsi == VIOAPIC_NUM_PINS )
+ return;
--hvm_irq->gsi_assert_count[gsi];
link = hvm_pci_intx_link(device, intx);
@@ -472,7 +600,9 @@ static int irq_load_pci(struct domain *d
if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
{
/* Direct GSI assert */
- gsi = hvm_pci_intx_gsi(dev, intx);
+ spin_lock(&d->event_lock);
+ gsi = hvm_pci_intx_gsi_find(d, dev, intx);
+ spin_unlock(&d->event_lock);
hvm_irq->gsi_assert_count[gsi]++;
/* PCI-ISA bridge assert */
link = hvm_pci_intx_link(dev, intx);
Index: xen-unstable.hg/xen/include/asm-x86/hvm/irq.h
===================================================================
--- xen-unstable.hg.orig/xen/include/asm-x86/hvm/irq.h 2009-03-02
15:02:28.000000000 +1100
+++ xen-unstable.hg/xen/include/asm-x86/hvm/irq.h 2009-03-02
15:02:34.000000000 +1100
@@ -27,6 +27,13 @@
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
+#define NR_PCI_DEV 32
+#define NR_PCI_INTX 4
+#define NR_PCI_DEVINTX (NR_PCI_DEV*NR_PCI_INTX)
+#define PCI_DEVINTX(dev, intx) ((dev * 4) + intx)
+
+#define NR_PCI_GSI (VIOAPIC_NUM_PINS - NR_ISAIRQS)
+
struct hvm_irq {
/*
* Virtual interrupt wires for a single PCI bus.
@@ -88,10 +95,20 @@ struct hvm_irq {
u8 round_robin_prev_vcpu;
struct hvm_irq_dpci *dpci;
+
+ /* Map guest pci device to guest gsi */
+ uint8_t pci_devintx_gsi[NR_PCI_DEVINTX];
+ /* PCI devices using a GSI */
+ uint8_t pci_gsi_cnt[NR_PCI_GSI];
};
-#define hvm_pci_intx_gsi(dev, intx) \
- (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
+/* Must be protected by domain's event_loc as hvm_irq_dpci is accessed */
+uint8_t hvm_pci_intx_gsi_find(struct domain *d, uint32_t device, uint32_t
intx);
+uint8_t hvm_pci_intx_gsi_bind_pt(struct domain *d, uint32_t device,
+ uint32_t intx);
+uint8_t hvm_pci_intx_gsi_unbind_pt(struct domain *d, uint32_t device,
+ uint32_t intx);
+
#define hvm_pci_intx_link(dev, intx) \
(((dev) + (intx)) & 3)
Index: xen-unstable.hg/xen/drivers/passthrough/io.c
===================================================================
--- xen-unstable.hg.orig/xen/drivers/passthrough/io.c 2009-03-02
15:02:28.000000000 +1100
+++ xen-unstable.hg/xen/drivers/passthrough/io.c 2009-03-02
15:02:34.000000000 +1100
@@ -129,7 +129,6 @@ int pt_irq_create_bind_vtd(
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
- guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
hvm_irq_dpci->link_cnt[link]++;
@@ -140,6 +139,13 @@ int pt_irq_create_bind_vtd(
return -ENOMEM;
}
+ guest_gsi = hvm_pci_intx_gsi_bind_pt(d, device, intx);
+ if (guest_gsi == VIOAPIC_NUM_PINS)
+ {
+ spin_unlock(&d->event_lock);
+ return -ENODEV;
+ }
+
digl->device = device;
digl->intx = intx;
digl->gsi = guest_gsi;
@@ -189,6 +195,7 @@ int pt_irq_create_bind_vtd(
hvm_irq_dpci->girq[guest_gsi].intx = 0;
hvm_irq_dpci->girq[guest_gsi].device = 0;
hvm_irq_dpci->girq[guest_gsi].valid = 0;
+ hvm_pci_intx_gsi_unbind_pt(d, device, intx);
list_del(&digl->list);
hvm_irq_dpci->link_cnt[link]--;
spin_unlock(&d->event_lock);
@@ -217,13 +224,8 @@ int pt_irq_destroy_bind_vtd(
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
- guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
- gdprintk(XENLOG_INFO,
- "pt_irq_destroy_bind_vtd: machine_gsi=%d "
- "guest_gsi=%d, device=%d, intx=%d.\n",
- machine_gsi, guest_gsi, device, intx);
spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
@@ -234,6 +236,7 @@ int pt_irq_destroy_bind_vtd(
return -EINVAL;
}
+ guest_gsi = hvm_pci_intx_gsi_unbind_pt(d, device, intx);
hvm_irq_dpci->link_cnt[link]--;
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
@@ -268,6 +271,10 @@ int pt_irq_destroy_bind_vtd(
}
spin_unlock(&d->event_lock);
gdprintk(XENLOG_INFO,
+ "pt_irq_destroy_bind_vtd: machine_gsi=%d "
+ "guest_gsi=%d, device=%d, intx=%d.\n",
+ machine_gsi, guest_gsi, device, intx);
+ gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|