ChangeSet 1.1662, 2005/06/03 14:14:54+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
All cpu bitmasks in Xen now use the cpumask_t type and its operators.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/ia64/domain.c | 21 -----------
arch/ia64/smp.c | 6 +--
arch/ia64/xenmisc.c | 3 +
arch/x86/domain.c | 24 ++++++++----
arch/x86/mm.c | 82 ++++++++++++++++++++------------------------
arch/x86/shadow.c | 4 +-
arch/x86/smp.c | 72 ++++++++++++++++----------------------
common/domain.c | 4 +-
common/grant_table.c | 8 ++--
common/page_alloc.c | 24 +++++++++---
common/schedule.c | 3 +
include/asm-ia64/flushtlb.h | 2 -
include/asm-x86/flushtlb.h | 40 ++++++++-------------
include/asm-x86/mm.h | 3 +
include/public/xen.h | 10 ++---
include/xen/sched.h | 7 ++-
include/xen/smp.h | 9 ++--
17 files changed, 149 insertions(+), 173 deletions(-)
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c 2005-06-03 10:02:00 -04:00
+++ b/xen/arch/ia64/domain.c 2005-06-03 10:02:00 -04:00
@@ -1242,24 +1242,3 @@
{
vcpu_pend_interrupt(dom0->vcpu[0],irq);
}
-
-/////////////////////////////////
-// added 01Apr2005, to accomodate change in xen/sched.h, not clear
-// yet if this functionality is needed on ia64
-#if 0
-static void __synchronise_lazy_execstate(void *unused)
-{
- if ( percpu_ctxt[smp_processor_id()].curr_ed != current )
- {
- __context_switch();
- load_LDT(current);
- clear_segments();
- }
-}
-#endif
-
-void synchronise_lazy_execstate(unsigned long cpuset)
-{
- //smp_subset_call_function(__synchronise_lazy_execstate, NULL, 1, cpuset);
-}
-/////////////////////////////////
diff -Nru a/xen/arch/ia64/smp.c b/xen/arch/ia64/smp.c
--- a/xen/arch/ia64/smp.c 2005-06-03 10:02:00 -04:00
+++ b/xen/arch/ia64/smp.c 2005-06-03 10:02:00 -04:00
@@ -20,14 +20,14 @@
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void flush_tlb_mask(unsigned long mask)
+void flush_tlb_mask(cpumask_t mask)
{
dummy();
}
//#if CONFIG_SMP || IA64
#if CONFIG_SMP
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void smp_send_event_check_mask(unsigned long cpu_mask)
+void smp_send_event_check_mask(cpumask_t mask)
{
dummy();
//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
@@ -35,7 +35,7 @@
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-int try_flush_tlb_mask(unsigned long mask)
+int try_flush_tlb_mask(cpumask_t mask)
{
dummy();
return 1;
diff -Nru a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c 2005-06-03 10:02:00 -04:00
+++ b/xen/arch/ia64/xenmisc.c 2005-06-03 10:02:00 -04:00
@@ -58,7 +58,8 @@
/* calls in xen/common code that are unused on ia64 */
-void sync_lazy_execstate_cpuset(unsigned long cpuset) {}
+void sync_lazy_execstate_cpu(unsigned int cpu) {}
+void sync_lazy_execstate_mask(cpumask_t mask) {}
void sync_lazy_execstate_all(void) {}
int grant_table_create(struct domain *d) { return 0; }
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-06-03 10:02:02 -04:00
+++ b/xen/arch/x86/domain.c 2005-06-03 10:02:02 -04:00
@@ -94,7 +94,7 @@
ASSERT(is_idle_task(v->domain));
percpu_ctxt[smp_processor_id()].curr_vcpu = v;
- set_bit(smp_processor_id(), &v->domain->cpuset);
+ cpu_set(smp_processor_id(), v->domain->cpumask);
v->arch.schedule_tail = continue_idle_task;
idle_loop();
@@ -744,7 +744,7 @@
}
if ( p->domain != n->domain )
- set_bit(cpu, &n->domain->cpuset);
+ cpu_set(cpu, n->domain->cpumask);
write_ptbase(n);
@@ -757,7 +757,7 @@
}
if ( p->domain != n->domain )
- clear_bit(cpu, &p->domain->cpuset);
+ cpu_clear(cpu, p->domain->cpumask);
percpu_ctxt[cpu].curr_vcpu = n;
}
@@ -817,19 +817,27 @@
return 1;
}
-void sync_lazy_execstate_cpuset(unsigned long cpuset)
+void sync_lazy_execstate_cpu(unsigned int cpu)
{
- if ( cpuset & (1 << smp_processor_id()) )
+ if ( cpu == smp_processor_id() )
+ (void)__sync_lazy_execstate();
+ else
+ flush_tlb_mask(cpumask_of_cpu(cpu));
+}
+
+void sync_lazy_execstate_mask(cpumask_t mask)
+{
+ if ( cpu_isset(smp_processor_id(), mask) )
(void)__sync_lazy_execstate();
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(cpuset & ~(1 << smp_processor_id()));
+ flush_tlb_mask(mask);
}
void sync_lazy_execstate_all(void)
{
__sync_lazy_execstate();
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(((1<<num_online_cpus())-1) & ~(1 << smp_processor_id()));
+ flush_tlb_mask(cpu_online_map);
}
unsigned long __hypercall_create_continuation(
@@ -971,7 +979,7 @@
{
struct vcpu *v;
- BUG_ON(d->cpuset != 0);
+ BUG_ON(!cpus_empty(d->cpumask));
physdev_destroy_state(d);
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-06-03 10:02:00 -04:00
+++ b/xen/arch/x86/mm.c 2005-06-03 10:02:00 -04:00
@@ -1348,13 +1348,13 @@
* may be unnecessary (e.g., page was GDT/LDT) but those
* circumstances should be very rare.
*/
- unsigned long cpuset = tlbflush_filter_cpuset(
- page_get_owner(page)->cpuset, page->tlbflush_timestamp);
+ cpumask_t mask = page_get_owner(page)->cpumask;
+ tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(cpuset != 0) )
+ if ( unlikely(!cpus_empty(mask)) )
{
perfc_incrc(need_flush_tlb_flush);
- flush_tlb_mask(cpuset);
+ flush_tlb_mask(mask);
}
/* We lose existing type, back pointer, and validity. */
@@ -1555,23 +1555,23 @@
return okay;
}
-static inline unsigned long vcpuset_to_pcpuset(
- struct domain *d, unsigned long vset)
+static inline cpumask_t vcpumask_to_pcpumask(
+ struct domain *d, unsigned long vmask)
{
- unsigned int vcpu;
- unsigned long pset = 0;
+ unsigned int vcpu_id;
+ cpumask_t pmask;
struct vcpu *v;
- while ( vset != 0 )
+ while ( vmask != 0 )
{
- vcpu = find_first_set_bit(vset);
- vset &= ~(1UL << vcpu);
- if ( (vcpu < MAX_VIRT_CPUS) &&
- ((v = d->vcpu[vcpu]) != NULL) )
- pset |= 1UL << v->processor;
+ vcpu_id = find_first_set_bit(vmask);
+ vmask &= ~(1UL << vcpu_id);
+ if ( (vcpu_id < MAX_VIRT_CPUS) &&
+ ((v = d->vcpu[vcpu_id]) != NULL) )
+ cpu_set(v->processor, pmask);
}
- return pset;
+ return pmask;
}
int do_mmuext_op(
@@ -1731,34 +1731,28 @@
case MMUEXT_TLB_FLUSH_MULTI:
case MMUEXT_INVLPG_MULTI:
{
- unsigned long vset, pset;
- if ( unlikely(get_user(vset, (unsigned long *)op.cpuset)) )
+ unsigned long vmask;
+ cpumask_t pmask;
+ if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) )
{
okay = 0;
break;
}
- pset = vcpuset_to_pcpuset(d, vset);
+ pmask = vcpumask_to_pcpumask(d, vmask);
+ cpus_and(pmask, pmask, d->cpumask);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
- {
- BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) !=
(1<<cpu)));
- flush_tlb_mask(pset & d->cpuset);
- }
+ flush_tlb_mask(pmask);
else
- {
- BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) !=
(1<<cpu)));
- flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
- }
+ flush_tlb_one_mask(pmask, op.linear_addr);
break;
}
case MMUEXT_TLB_FLUSH_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_mask(d->cpuset);
+ flush_tlb_mask(d->cpumask);
break;
case MMUEXT_INVLPG_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_one_mask(d->cpuset, op.linear_addr);
+ flush_tlb_one_mask(d->cpumask, op.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
@@ -2256,7 +2250,8 @@
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned int cpu = v->processor;
- unsigned long vset, pset, bmap_ptr;
+ unsigned long vmask, bmap_ptr;
+ cpumask_t pmask;
int rc = 0;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|