[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 09/12] x86/irq: convert cpumask parameter to integer in {,p}irq_set_affinity()


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • Date: Thu, 20 Nov 2025 10:58:23 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=7sT9dG4bTppFe0E9dtJP58jC+wxTYyALuG2pVgbrgjQ=; b=thZEft+iQSnjwtujR28j3/Qox0Pbfc33xgnq1+HeLDPw/DQfkCG2xMsqnjUu3KMb79Ph9u1afD3Nd22x9AKq7ORrZEnUn56zpPjbHnqrR57N+n1mKd3upiBZo/+DnnRYq6nEL1gAsEN/VRbm3G/r7/LCUTcbYUO08ZnWEGM5jV+0EsjAEwSFzfm/J/Mw5pSCgMA4v4LWJKbcb0Hpkq04sdpm63yj4HWYwzB5bugpuyr78IUSV7sJHVDrXguNAX+czrdkpmLYUf1ZhSvhvAm/M4FEGeJppuMdDTXMtkxUd8WNZYIYMLg7PRWOfHLO6ITQWA/bzEyfPxvjs83pIYVYYg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=Lj4Uhx1xivNmQbsGPfN+PgcWEJPX5EUnoWEovCiM/x3G63KPOFYy/LRP7b9HFcQmTIPnfPVxKJtzuWIMs8oM3tDW2xNb+4MikSPnc+t6MaNiQWuzonob+zCZLu0NTfauen0GPy61DyHLG7S5bKSZgFCJs2+NUN7s8Hc1lRaHA1M80aZM0YQn51ZyQV7MlJXmOBynzECtuAWUrsPVkt+QjeAhildH0spNIDByc/xnUx9pJ+ADdSRCSZJ3SFEKk2D3iy+QzqQIlTivKmKq6dm4+s50AAWGH/F2QQPRkdBedSXmQetaL4c/SxAJuR9S0C6TDVLjbfIYn31DQtgKcwOqrA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=citrix.com;
  • Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>
  • Delivery-date: Thu, 20 Nov 2025 09:59:01 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Existing callers where already generating the passed cpumask using
cpumask_of() to contain a single target CPU.  Reduce complexity by passing
the single target CPU as an integer parameter.

No functional change intended.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
The function names are misleading, as {,p}irq_set_affinity() doesn't adjust
the affinity of the interrupt (desc->affinity) but the interrupt target
itself.  Further cleanup might be helpful to correctly differentiate
between setting interrupt affinity vs setting interrupt target.
---
 xen/arch/x86/hvm/hvm.c         | 2 +-
 xen/arch/x86/include/asm/irq.h | 2 +-
 xen/arch/x86/irq.c             | 8 ++++----
 xen/common/event_channel.c     | 6 ++----
 xen/include/xen/irq.h          | 3 +--
 5 files changed, 9 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0ff242d4a0d6..33521599a844 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -485,7 +485,7 @@ void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, 
const struct vcpu *v)
         if ( !desc )
             return;
         ASSERT(MSI_IRQ(desc - irq_desc));
-        irq_set_affinity(desc, cpumask_of(v->processor));
+        irq_set_affinity(desc, v->processor);
         spin_unlock_irq(&desc->lock);
     }
 }
diff --git a/xen/arch/x86/include/asm/irq.h b/xen/arch/x86/include/asm/irq.h
index 355332188932..73abc8323a8d 100644
--- a/xen/arch/x86/include/asm/irq.h
+++ b/xen/arch/x86/include/asm/irq.h
@@ -202,7 +202,7 @@ void move_masked_irq(struct irq_desc *desc);
 int bind_irq_vector(int irq, int vector, unsigned int cpu);
 
 void cf_check end_nonmaskable_irq(struct irq_desc *desc, uint8_t vector);
-void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask);
+void irq_set_affinity(struct irq_desc *desc, unsigned int cpu);
 
 int init_domain_irq_mapping(struct domain *d);
 void cleanup_domain_irq_mapping(struct domain *d);
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index e09559fce856..bfb94852a6dc 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -947,7 +947,7 @@ unsigned int set_desc_affinity(struct irq_desc *desc, const 
cpumask_t *mask)
 }
 
 /* For re-setting irq interrupt affinity for specific irq */
-void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+void irq_set_affinity(struct irq_desc *desc, unsigned int cpu)
 {
     if (!desc->handler->set_affinity)
         return;
@@ -955,19 +955,19 @@ void irq_set_affinity(struct irq_desc *desc, const 
cpumask_t *mask)
     ASSERT(spin_is_locked(&desc->lock));
     desc->status &= ~IRQ_MOVE_PENDING;
     smp_wmb();
-    cpumask_copy(desc->arch.pending_mask, mask);
+    cpumask_copy(desc->arch.pending_mask, cpumask_of(cpu));
     smp_wmb();
     desc->status |= IRQ_MOVE_PENDING;
 }
 
-void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask)
+void pirq_set_affinity(struct domain *d, int pirq, unsigned int cpu)
 {
     unsigned long flags;
     struct irq_desc *desc = domain_spin_lock_irq_desc(d, pirq, &flags);
 
     if ( !desc )
         return;
-    irq_set_affinity(desc, mask);
+    irq_set_affinity(desc, cpu);
     spin_unlock_irqrestore(&desc->lock, flags);
 }
 
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 67700b050ad1..8e155649b171 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -1226,8 +1226,7 @@ int evtchn_bind_vcpu(evtchn_port_t port, unsigned int 
vcpu_id)
             break;
         unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
         chn->notify_vcpu_id = v->vcpu_id;
-        pirq_set_affinity(d, chn->u.pirq.irq,
-                          cpumask_of(v->processor));
+        pirq_set_affinity(d, chn->u.pirq.irq, v->processor);
         link_pirq_port(port, chn, v);
         break;
 #endif
@@ -1712,7 +1711,6 @@ void evtchn_destroy_final(struct domain *d)
 void evtchn_move_pirqs(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    const cpumask_t *mask = cpumask_of(v->processor);
     unsigned int port;
     struct evtchn *chn;
 
@@ -1720,7 +1718,7 @@ void evtchn_move_pirqs(struct vcpu *v)
     for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
     {
         chn = evtchn_from_port(d, port);
-        pirq_set_affinity(d, chn->u.pirq.irq, mask);
+        pirq_set_affinity(d, chn->u.pirq.irq, v->processor);
     }
     read_unlock(&d->event_lock);
 }
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index 95034c0d6bb5..f0b119d23521 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -193,8 +193,7 @@ extern void desc_guest_eoi(struct irq_desc *desc, struct 
pirq *pirq);
 extern int pirq_guest_unmask(struct domain *d);
 extern int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share);
 extern void pirq_guest_unbind(struct domain *d, struct pirq *pirq);
-extern void pirq_set_affinity(struct domain *d, int pirq,
-                              const cpumask_t *mask);
+extern void pirq_set_affinity(struct domain *d, int pirq, unsigned int cpu);
 extern struct irq_desc *domain_spin_lock_irq_desc(
     struct domain *d, int pirq, unsigned long *pflags);
 extern struct irq_desc *pirq_spin_lock_irq_desc(
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.