This patch allows hypercall from qemu on stub domain
xend calls these hypercalls for PCI pass-through.
XEN_DOMCTL_assign_device
XEN_DOMCTL_deassign_device
XEN_DOMCTL_ioport_permission
XEN_DOMCTL_iomem_permission
XEN_DOMCTL_irq_permission
In addition to this, this patch allows stub domain to call the following
hypercalls. Hypervisor checks permission with IS_PRIV_FOR and
{ioports,iomem,irq}_access_permitted.
XEN_DOMCTL_memory_mapping
XEN_DOMCTL_ioport_mapping
PHYSDEVOP_map_pirq
PHYSDEVOP_unmap_pirq
XEN_DOMCTL_bind_pt_irq
XEN_DOMCTL_unbind_pt_irq
This patch allows to stub domain to call the following hypercall.
Hypervisor checks the permission with IS_PRIV_FOR.
XEN_DOMCTL_pin_mem_cacheattr
Thanks,
--
Shohei Fujiwara
Signed-off-by: Shohei Fujiwara <fujiwara-sxa@xxxxxxxxxxxxxxx>
diff -r 5b8f9ef92e00 tools/python/xen/xend/server/pciif.py
--- a/tools/python/xen/xend/server/pciif.py Wed Mar 11 19:13:47 2009 +0000
+++ b/tools/python/xen/xend/server/pciif.py Mon Mar 23 18:19:24 2009 +0900
@@ -289,7 +289,10 @@ class PciController(DevController):
def setupOneDevice(self, domain, bus, slot, func):
""" Attach I/O resources for device to frontend domain
"""
- fe_domid = self.getDomid()
+ if self.vm.info.target():
+ fe_domid = self.vm.info.target()
+ else:
+ fe_domid = self.getDomid()
try:
dev = PciDevice(domain, bus, slot, func)
@@ -366,12 +369,6 @@ class PciController(DevController):
if rc<0:
raise VmError(('pci: failed to remove msi-x iomem'))
- rc = xc.physdev_map_pirq(domid = fe_domid,
- index = dev.irq,
- pirq = dev.irq)
- if rc < 0:
- raise VmError(('pci: failed to map irq on device '+
- '%s - errno=%d')%(dev.name,rc))
if dev.irq>0:
log.debug('pci: enabling irq %d'%dev.irq)
rc = xc.domain_irq_permission(domid = fe_domid, pirq = dev.irq,
@@ -470,7 +467,10 @@ class PciController(DevController):
def cleanupOneDevice(self, domain, bus, slot, func):
""" Detach I/O resources for device from frontend domain
"""
- fe_domid = self.getDomid()
+ if self.vm.info.target():
+ fe_domid = self.vm.info.target()
+ else:
+ fe_domid = self.getDomid()
try:
dev = PciDevice(domain, bus, slot, func)
diff -r 5b8f9ef92e00 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Wed Mar 11 19:13:47 2009 +0000
+++ b/xen/arch/x86/domctl.c Tue Mar 24 15:11:11 2009 +0900
@@ -763,6 +763,13 @@ long arch_do_domctl(
goto bind_out;
ret = -ESRCH;
+
+ if ( !irq_access_permitted(d, bind->machine_irq) )
+ {
+ gdprintk(XENLOG_ERR, "dom%d: binding of irq %d is not permitted\n",
+ d->domain_id, bind->machine_irq);
+ goto bind_out;
+ }
if ( iommu_enabled )
{
spin_lock(&pcidevs_lock);
@@ -786,6 +793,14 @@ long arch_do_domctl(
if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
break;
bind = &(domctl->u.bind_pt_irq);
+
+ if ( !irq_access_permitted(d, bind->machine_irq) )
+ {
+ gdprintk(XENLOG_ERR, "dom%d: unbinding of irq %d is not
permitted\n",
+ d->domain_id, bind->machine_irq);
+ rcu_unlock_domain(d);
+ break;
+ }
if ( iommu_enabled )
{
spin_lock(&pcidevs_lock);
@@ -815,13 +830,26 @@ long arch_do_domctl(
break;
ret=0;
+
+ if ( !iomem_access_permitted(d, mfn, mfn + nr_mfns - 1) )
+ {
+ gdprintk(XENLOG_ERR, "dom%d: %s of memory mapping 0x%lx"
+ "is not permitted\n",
+ d->domain_id,
+ domctl->u.memory_mapping.add_mapping ?
+ "adding" : "removing",
+ mfn);
+ rcu_unlock_domain(d);
+ ret = -ESRCH;
+ break;
+ }
+
if ( domctl->u.memory_mapping.add_mapping )
{
gdprintk(XENLOG_INFO,
"memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
gfn, mfn, nr_mfns);
- ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
for ( i = 0; i < nr_mfns; i++ )
set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
}
@@ -833,7 +861,6 @@ long arch_do_domctl(
for ( i = 0; i < nr_mfns; i++ )
clear_mmio_p2m_entry(d, gfn+i);
- ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
}
rcu_unlock_domain(d);
@@ -866,6 +893,21 @@ long arch_do_domctl(
break;
hd = domain_hvm_iommu(d);
+
+ if ( !ioports_access_permitted(d, fmp, fmp + np - 1) )
+ {
+ gdprintk(XENLOG_ERR, "dom%d: %s of ioport mapping 0x%x"
+ "is not permitted\n",
+ d->domain_id,
+ domctl->u.ioport_mapping.add_mapping ?
+ "adding" : "removing",
+ fmp );
+ rcu_unlock_domain(d);
+ break;
+ }
+ else
+ ret = 0;
+
if ( domctl->u.ioport_mapping.add_mapping )
{
gdprintk(XENLOG_INFO,
@@ -888,7 +930,6 @@ long arch_do_domctl(
g2m_ioport->np = np;
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
}
- ret = ioports_permit_access(d, fmp, fmp + np - 1);
}
else
{
@@ -902,7 +943,6 @@ long arch_do_domctl(
xfree(g2m_ioport);
break;
}
- ret = ioports_deny_access(d, fmp, fmp + np - 1);
}
rcu_unlock_domain(d);
}
diff -r 5b8f9ef92e00 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Wed Mar 11 19:13:47 2009 +0000
+++ b/xen/arch/x86/irq.c Tue Mar 24 15:15:01 2009 +0900
@@ -910,7 +910,7 @@ int map_domain_pirq(
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
- if ( !IS_PRIV(current->domain) )
+ if ( !IS_PRIV_FOR(current->domain, d) )
return -EPERM;
if ( pirq < 0 || pirq >= NR_IRQS || vector < 0 || vector >= NR_VECTORS )
@@ -929,14 +929,6 @@ int map_domain_pirq(
dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
d->domain_id, pirq, vector);
return -EINVAL;
- }
-
- ret = irq_permit_access(d, pirq);
- if ( ret )
- {
- dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
- d->domain_id, pirq);
- return ret;
}
desc = &irq_desc[vector];
@@ -988,7 +980,7 @@ int unmap_domain_pirq(struct domain *d,
if ( (pirq < 0) || (pirq >= NR_IRQS) )
return -EINVAL;
- if ( !IS_PRIV(current->domain) )
+ if ( !IS_PRIV_FOR(current->domain, d) )
return -EINVAL;
ASSERT(spin_is_locked(&pcidevs_lock));
@@ -1003,6 +995,13 @@ int unmap_domain_pirq(struct domain *d,
goto done;
}
+ if ( !irq_access_permitted(d, pirq) )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: unmapping of pirq %d is not permitted\n",
+ d->domain_id, pirq);
+ ret = -EINVAL;
+ goto done;
+ }
forced_unbind = pirq_guest_force_unbind(d, pirq);
if ( forced_unbind )
dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
diff -r 5b8f9ef92e00 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Wed Mar 11 19:13:47 2009 +0000
+++ b/xen/arch/x86/physdev.c Mon Mar 23 17:33:17 2009 +0900
@@ -34,9 +34,6 @@ static int physdev_map_pirq(struct physd
struct msi_info _msi;
void *map_data = NULL;
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
if ( !map )
return -EINVAL;
@@ -47,6 +44,22 @@ static int physdev_map_pirq(struct physd
if ( d == NULL )
{
+ ret = -ESRCH;
+ goto free_domain;
+ }
+ else
+ {
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ {
+ ret = -EPERM;
+ goto free_domain;
+ }
+ }
+
+ if ( !irq_access_permitted(d, map->index) )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: mapping of irq %d is not permitted\n",
+ d->domain_id, map->index);
ret = -ESRCH;
goto free_domain;
}
@@ -62,7 +75,7 @@ static int physdev_map_pirq(struct physd
ret = -EINVAL;
goto free_domain;
}
- vector = domain_irq_to_vector(current->domain, map->index);
+ vector = domain_irq_to_vector(dom0, map->index);
if ( !vector )
{
dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
@@ -160,9 +173,6 @@ static int physdev_unmap_pirq(struct phy
struct domain *d;
int ret;
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
if ( unmap->domid == DOMID_SELF )
d = rcu_lock_domain(current->domain);
else
@@ -170,6 +180,14 @@ static int physdev_unmap_pirq(struct phy
if ( d == NULL )
return -ESRCH;
+ else
+ {
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
+ }
spin_lock(&pcidevs_lock);
spin_lock(&d->event_lock);
diff -r 5b8f9ef92e00 xen/common/domctl.c
--- a/xen/common/domctl.c Wed Mar 11 19:13:47 2009 +0000
+++ b/xen/common/domctl.c Mon Mar 23 18:05:20 2009 +0900
@@ -193,14 +193,45 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
long ret = 0;
struct xen_domctl curop, *op = &curop;
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
if ( copy_from_guest(op, u_domctl, 1) )
return -EFAULT;
if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
return -EACCES;
+
+ switch ( op->cmd )
+ {
+ case XEN_DOMCTL_pin_mem_cacheattr:
+ case XEN_DOMCTL_memory_mapping:
+ case XEN_DOMCTL_ioport_mapping:
+ case XEN_DOMCTL_bind_pt_irq:
+ case XEN_DOMCTL_unbind_pt_irq:
+ {
+ struct domain *d;
+
+ if (op->domain != DOMID_SELF )
+ {
+ d = rcu_lock_domain_by_id(op->domain);
+ if ( d == NULL )
+ return -ESRCH;
+ else
+ {
+ if ( !IS_PRIV_FOR(current->domain,d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
+ else
+ rcu_unlock_domain(d);
+ }
+ break;
+ }
+ }
+ default:
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+ break;
+ }
spin_lock(&domctl_lock);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|