[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 11/12] x86/irq: convert irq_desc old_cpu_mask field to integer


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • Date: Thu, 20 Nov 2025 10:58:25 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=wYLemCz/ejmq9i/3iHjcMi3Ukp02UqKJT5V2Wj0bREE=; b=BYco59qnV3GU9amhluVdFa3cLkJNgMgu+Yj5sFg/T+G6beUrAS0pHgPFcnkH8nhiS7l8TTcBUVjEkRAoccinwK2jTepP2TbLMtt1RdeYHfm5ATbtXgQR1X3Ntec3hgnxpoYQdL1fVfejG1bygdq40ibkUMFqlrCnhgOup/EdmlC7OjeZGXZZYfbkKKdGAZXUWBLhrvNn+EQ04qQQsrYcmEZvQKX/DgX/i1hMmM4a8Z2dk6jYEf5S3Ix7I1fDIKAWmYg2koO/Fokb7IYzaBP4AVkg/5aJAEfZYriRGWRANbDLgJUJICGXkdk1EHPHls65bZZNk9W9aGU4lM3FIMej5Q==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=O+C0Qn5mHIiXm3yBaBkfqza/XW5r98lQRcqT2DPhCYSzchDN08kDNn48auRk0Hv8P3BPkm7itp5frCvk/amnoi5/1hqYcsblRGfd/V9LoGVW+0fgNoTI4BRDK68DUJN/gJcrT87ofPSyluu5rzTOUL191qANt2L00SCulTI223OVEXTyPquZPCRWNr8FBp0WWUA5qAP5AGwZAZOtyaeFPDhIcF1v8rn6v93QPfmlqxH4PGyQ8glTds7h1ttZhOSw3nek5MbF3rcfGg9AyfaelrXRJSKxToEru1zBuSK7AUg5H2a0ft2LHizeTkDK8n4HFECtGleK1PCpsNK/dSdwpA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=citrix.com;
  • Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • Delivery-date: Thu, 20 Nov 2025 09:59:06 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

As the cpu_mask field has already been converted to an integer, propagate
such change to the field that stores the previous target CPU and convert it
to an integer.

Also convert the move_cleanup_count field into a boolean, since the
previous target will always be a single CPU.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/include/asm/irq.h |  4 +-
 xen/arch/x86/irq.c             | 90 +++++++++++++---------------------
 2 files changed, 35 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/include/asm/irq.h b/xen/arch/x86/include/asm/irq.h
index 97c706acebf2..bc59ce7c3ffb 100644
--- a/xen/arch/x86/include/asm/irq.h
+++ b/xen/arch/x86/include/asm/irq.h
@@ -72,10 +72,10 @@ struct arch_irq_desc {
 /* Special target CPU values. */
 #define CPU_INVALID  ~0U
         unsigned int cpu;                /* Target CPU of the interrupt. */
-        cpumask_var_t old_cpu_mask;
+        unsigned int old_cpu;
         cpumask_var_t pending_mask;
         vmask_t *used_vectors;
-        unsigned move_cleanup_count;
+        bool move_cleanup : 1;
         bool move_in_progress : 1;
         int8_t used;
         /*
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index a56d1e8fc267..680f190da065 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -115,7 +115,7 @@ static void release_old_vec(struct irq_desc *desc)
     unsigned int vector = desc->arch.old_vector;
 
     desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpumask_clear(desc->arch.old_cpu_mask);
+    desc->arch.old_cpu = CPU_INVALID;
 
     if ( !valid_irq_vector(vector) )
         ASSERT_UNREACHABLE();
@@ -195,7 +195,6 @@ static void _clear_irq_vector(struct irq_desc *desc)
 {
     unsigned int cpu = desc->arch.cpu, old_vector, irq = desc->irq;
     unsigned int vector = desc->arch.vector;
-    cpumask_t *tmp_mask = this_cpu(scratch_cpumask);
 
     BUG_ON(!valid_irq_vector(vector));
 
@@ -221,10 +220,10 @@ static void _clear_irq_vector(struct irq_desc *desc)
     {
         /* If we were in motion, also clear desc->arch.old_vector */
         old_vector = desc->arch.old_vector;
-        cpumask_and(tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
 
-        for_each_cpu(cpu, tmp_mask)
+        if ( cpu_online(desc->arch.old_cpu) )
         {
+            cpu = desc->arch.old_cpu;
             ASSERT(per_cpu(vector_irq, cpu)[old_vector] == irq);
             TRACE_TIME(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
             per_cpu(vector_irq, cpu)[old_vector] = ~irq;
@@ -388,16 +387,11 @@ int irq_to_vector(int irq)
 
 int arch_init_one_irq_desc(struct irq_desc *desc)
 {
-    if ( !alloc_cpumask_var(&desc->arch.old_cpu_mask) )
-        return -ENOMEM;
-
     if ( !alloc_cpumask_var(&desc->arch.pending_mask) )
-    {
-        free_cpumask_var(desc->arch.old_cpu_mask);
         return -ENOMEM;
-    }
 
     desc->arch.cpu = CPU_INVALID;
+    desc->arch.old_cpu = CPU_INVALID;
     desc->arch.vector = IRQ_VECTOR_UNASSIGNED;
     desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
     desc->arch.creator_domid = DOMID_INVALID;
@@ -554,7 +548,7 @@ static int _assign_irq_vector(struct irq_desc *desc, const 
cpumask_t *mask)
         }
     }
 
-    if ( desc->arch.move_in_progress || desc->arch.move_cleanup_count )
+    if ( desc->arch.move_in_progress || desc->arch.move_cleanup )
     {
         /*
          * If the current destination is online refuse to shuffle.  Retry after
@@ -570,9 +564,9 @@ static int _assign_irq_vector(struct irq_desc *desc, const 
cpumask_t *mask)
          * ->arch.old_cpu_mask.
          */
         ASSERT(valid_irq_vector(desc->arch.old_vector));
-        ASSERT(cpumask_intersects(desc->arch.old_cpu_mask, &cpu_online_map));
+        ASSERT(cpu_online(desc->arch.old_cpu));
 
-        if ( cpumask_intersects(desc->arch.old_cpu_mask, mask) )
+        if ( cpumask_test_cpu(desc->arch.old_cpu, mask) )
         {
             /*
              * Fallback to the old destination if moving is in progress and the
@@ -581,16 +575,16 @@ static int _assign_irq_vector(struct irq_desc *desc, 
const cpumask_t *mask)
              * in the 'mask' parameter.
              */
             desc->arch.vector = desc->arch.old_vector;
-            desc->arch.cpu = cpumask_any(desc->arch.old_cpu_mask);
+            desc->arch.cpu = desc->arch.old_cpu;
 
             /* Undo any possibly done cleanup. */
             per_cpu(vector_irq, desc->arch.cpu)[desc->arch.vector] = irq;
 
             /* Cancel the pending move and release the current vector. */
             desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
-            cpumask_clear(desc->arch.old_cpu_mask);
+            desc->arch.old_cpu = CPU_INVALID;
             desc->arch.move_in_progress = 0;
-            desc->arch.move_cleanup_count = 0;
+            desc->arch.move_cleanup =  false;
             if ( desc->arch.used_vectors )
             {
                 ASSERT(test_bit(old_vector, desc->arch.used_vectors));
@@ -656,7 +650,7 @@ next:
         current_vector = vector;
         current_offset = offset;
 
-        if ( desc->arch.move_in_progress || desc->arch.move_cleanup_count )
+        if ( desc->arch.move_in_progress || desc->arch.move_cleanup )
         {
             ASSERT(!cpu_online(desc->arch.cpu));
             /*
@@ -673,12 +667,13 @@ next:
         }
         else if ( valid_irq_vector(old_vector) )
         {
-            cpumask_clear(desc->arch.old_cpu_mask);
-            if ( cpu_online(desc->arch.cpu) )
-                cpumask_set_cpu(desc->arch.cpu, desc->arch.old_cpu_mask);
+            desc->arch.old_cpu = CPU_INVALID;
             desc->arch.old_vector = desc->arch.vector;
-            if ( !cpumask_empty(desc->arch.old_cpu_mask) )
+            if ( cpu_online(desc->arch.cpu) )
+            {
+                desc->arch.old_cpu = desc->arch.cpu;
                 desc->arch.move_in_progress = 1;
+            }
             else
                 /* This can happen while offlining a CPU. */
                 release_old_vec(desc);
@@ -833,7 +828,7 @@ void cf_check irq_move_cleanup_interrupt(void)
         if (desc->handler->enable == enable_8259A_irq)
             goto unlock;
 
-        if (!desc->arch.move_cleanup_count)
+        if ( !desc->arch.move_cleanup )
             goto unlock;
 
         if ( vector == desc->arch.vector && me == desc->arch.cpu )
@@ -862,13 +857,10 @@ void cf_check irq_move_cleanup_interrupt(void)
         TRACE_TIME(TRC_HW_IRQ_MOVE_CLEANUP, irq, vector, me);
 
         per_cpu(vector_irq, me)[vector] = ~irq;
-        desc->arch.move_cleanup_count--;
+        desc->arch.move_cleanup = false;
 
-        if ( desc->arch.move_cleanup_count == 0 )
-        {
-            ASSERT(vector == desc->arch.old_vector);
-            release_old_vec(desc);
-        }
+        ASSERT(vector == desc->arch.old_vector);
+        release_old_vec(desc);
 unlock:
         spin_unlock(&desc->lock);
     }
@@ -876,12 +868,11 @@ unlock:
 
 static void send_cleanup_vector(struct irq_desc *desc)
 {
-    cpumask_and(desc->arch.old_cpu_mask, desc->arch.old_cpu_mask,
-                &cpu_online_map);
-    desc->arch.move_cleanup_count = cpumask_weight(desc->arch.old_cpu_mask);
-
-    if ( desc->arch.move_cleanup_count )
-        send_IPI_mask(desc->arch.old_cpu_mask, IRQ_MOVE_CLEANUP_VECTOR);
+    if ( cpu_online(desc->arch.old_cpu) )
+    {
+        desc->arch.move_cleanup = true;
+        send_IPI_mask(cpumask_of(desc->arch.old_cpu), IRQ_MOVE_CLEANUP_VECTOR);
+    }
     else
         release_old_vec(desc);
 
@@ -2003,7 +1994,7 @@ void do_IRQ(struct cpu_user_regs *regs)
                            ~irq, CPUMASK_PR(desc->affinity),
                            /* TODO: handle hipri vectors nicely. */
                            CPUMASK_PR(get_cpumask(desc->arch.cpu)),
-                           CPUMASK_PR(desc->arch.old_cpu_mask),
+                           CPUMASK_PR(get_cpumask(desc->arch.old_cpu)),
                            desc->arch.vector, desc->arch.old_vector,
                            desc->handler->typename, desc->status);
                     spin_unlock(&desc->lock);
@@ -2636,26 +2627,14 @@ void fixup_irqs(void)
             continue;
         }
 
-        if ( desc->arch.move_cleanup_count )
+        if ( desc->arch.move_cleanup && !cpu_online(desc->arch.old_cpu) )
         {
             /* The cleanup IPI may have got sent while we were still online. */
-            cpumask_andnot(affinity, desc->arch.old_cpu_mask,
-                           &cpu_online_map);
-            desc->arch.move_cleanup_count -= cpumask_weight(affinity);
-            if ( !desc->arch.move_cleanup_count )
-                release_old_vec(desc);
-            else
-                /*
-                 * Adjust old_cpu_mask to account for the offline CPUs,
-                 * otherwise further calls to fixup_irqs() could subtract those
-                 * again and possibly underflow the counter.
-                 */
-                cpumask_andnot(desc->arch.old_cpu_mask, 
desc->arch.old_cpu_mask,
-                               affinity);
+            desc->arch.move_cleanup = false;
+            release_old_vec(desc);
         }
 
-        if ( desc->arch.move_in_progress &&
-             cpumask_test_cpu(cpu, desc->arch.old_cpu_mask) )
+        if ( desc->arch.move_in_progress && cpu == desc->arch.old_cpu )
         {
             /*
              * This to be offlined CPU was the target of an interrupt that's
@@ -2685,12 +2664,9 @@ void fixup_irqs(void)
              * per-cpu vector table will no longer have ->arch.old_vector
              * setup, and hence ->arch.old_cpu_mask would be stale.
              */
-            cpumask_clear_cpu(cpu, desc->arch.old_cpu_mask);
-            if ( cpumask_empty(desc->arch.old_cpu_mask) )
-            {
-                desc->arch.move_in_progress = 0;
-                release_old_vec(desc);
-            }
+            desc->arch.old_cpu = CPU_INVALID;
+            desc->arch.move_in_progress = 0;
+            release_old_vec(desc);
         }
 
         /*
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.