WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] Enable PCI passthrough with stub domain.

To: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] Enable PCI passthrough with stub domain.
From: Shohei Fujiwara <fujiwara-sxa@xxxxxxxxxxxxxxx>
Date: Fri, 09 Jan 2009 19:08:41 +0900
Cc:
Delivery-date: Fri, 09 Jan 2009 02:09:16 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This patch enables PCI passthrough with stub domain.

PCI passthrough with stub domain has failed in the past.
The primary reason is that hypercalls from qemu in stub domain are rejected.

This patch allows qemu in stub domain to call the hypercalls which is
needed for PCI passthrough. For security, if target domain of
hypercall is different from that of stub domain, it rejects hypercall.

To use PCI passthrough, please configure pciback driver to be
permissive, because qemu has own virtualization code for PCI
passthrough. For example:

    # cd /sys/bus/pci/drivers/pciback
    # echo "0000:0d:00.0" > permissive

Currently Linux guest only works. 
But Windows guest doesn't work, so I'm debugging for now. 
I hope this patch will be useful for other developers in community.

Thanks,
--
Shohei Fujiwara.


Signed-off-by: Shohei Fujiwara <fujiwara-sxa@xxxxxxxxxxxxxxx>

diff -r b9721b2766c1 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Wed Jan 07 11:25:00 2009 +0000
+++ b/xen/arch/x86/irq.c        Thu Jan 08 18:58:36 2009 +0900
@@ -857,7 +857,7 @@ int map_domain_pirq(
     if ( type == MAP_PIRQ_TYPE_MSI )
         return -EINVAL;
 
-    if ( !IS_PRIV(current->domain) )
+    if ( !IS_PRIV_FOR(current->domain, d) )
         return -EPERM;
 
     if ( pirq < 0 || pirq >= NR_IRQS || vector < 0 || vector >= NR_VECTORS )
@@ -931,7 +931,7 @@ int unmap_domain_pirq(struct domain *d, 
     if ( (pirq < 0) || (pirq >= NR_IRQS) )
         return -EINVAL;
 
-    if ( !IS_PRIV(current->domain) )
+    if ( !IS_PRIV_FOR(current->domain, d) )
         return -EINVAL;
 
     ASSERT(spin_is_locked(&pcidevs_lock));
diff -r b9721b2766c1 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Wed Jan 07 11:25:00 2009 +0000
+++ b/xen/arch/x86/physdev.c    Thu Jan 08 18:58:36 2009 +0900
@@ -34,9 +34,6 @@ static int physdev_map_pirq(struct physd
     struct msi_info _msi;
     void *map_data = NULL;
 
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
     if ( !map )
         return -EINVAL;
 
@@ -49,6 +46,14 @@ static int physdev_map_pirq(struct physd
     {
         ret = -ESRCH;
         goto free_domain;
+    }
+    else
+    {
+        if ( !IS_PRIV_FOR(current->domain, d) )
+        {
+            ret = -EPERM;
+            goto free_domain;
+        }
     }
 
     /* Verify or get vector. */
@@ -161,8 +166,8 @@ static int physdev_unmap_pirq(struct phy
     struct domain *d;
     int ret;
 
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
+    if ( !unmap )
+        return -EINVAL;
 
     if ( unmap->domid == DOMID_SELF )
         d = rcu_lock_domain(current->domain);
@@ -171,6 +176,14 @@ static int physdev_unmap_pirq(struct phy
 
     if ( d == NULL )
         return -ESRCH;
+    else
+    {
+        if ( !IS_PRIV_FOR(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
+    }
 
     spin_lock(&pcidevs_lock);
     spin_lock(&d->event_lock);
diff -r b9721b2766c1 xen/common/domctl.c
--- a/xen/common/domctl.c       Wed Jan 07 11:25:00 2009 +0000
+++ b/xen/common/domctl.c       Thu Jan 08 18:58:36 2009 +0900
@@ -193,14 +193,49 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
     long ret = 0;
     struct xen_domctl curop, *op = &curop;
 
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
     if ( copy_from_guest(op, u_domctl, 1) )
         return -EFAULT;
 
     if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
         return -EACCES;
+
+    switch ( op->cmd )
+    {
+    case XEN_DOMCTL_pin_mem_cacheattr:
+    case XEN_DOMCTL_memory_mapping:
+    case XEN_DOMCTL_ioport_mapping:
+    case XEN_DOMCTL_assign_device:
+    case XEN_DOMCTL_deassign_device:
+    case XEN_DOMCTL_bind_pt_irq:
+    case XEN_DOMCTL_unbind_pt_irq:
+    {
+        struct domain *d;
+
+        if ( op->domain != DOMID_SELF )
+        {
+            d = rcu_lock_domain_by_id(op->domain);
+
+            if ( d == NULL )
+                return -ESRCH;
+            else
+            {
+                if ( !IS_PRIV_FOR(current->domain, d) )
+                {
+                    rcu_unlock_domain(d);
+                    return -EPERM;
+                }
+                else
+                    rcu_unlock_domain(d);
+            }
+            break;
+        }
+    }
+
+    default:
+        if ( !IS_PRIV(current->domain) )
+            return -EPERM;
+        break;
+    }
 
     spin_lock(&domctl_lock);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel