# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1173464807 0
# Node ID 215b799fa181a807aed289e39b56c8e0c463f322
# Parent 1a01d8d9dbeca77c934156d42e955ada492b9c7b
xen: New vcpu_op commands for setting periodic and single-shot timers.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/ia64/xen/xensetup.c | 2
xen/arch/powerpc/setup.c | 3
xen/arch/x86/apic.c | 14 ++--
xen/arch/x86/setup.c | 8 --
xen/common/compat/domain.c | 4 +
xen/common/domain.c | 61 +++++++++++++++++++
xen/common/page_alloc.c | 15 ++++
xen/common/sched_credit.c | 40 ++++++++++--
xen/common/schedule.c | 135 ++++++++++++++++++++++++-------------------
xen/include/public/vcpu.h | 36 +++++++++--
xen/include/xen/mm.h | 5 +
xen/include/xen/sched-if.h | 2
xen/include/xen/sched.h | 8 +-
xen/include/xen/timer.h | 4 -
14 files changed, 240 insertions(+), 97 deletions(-)
diff -r 1a01d8d9dbec -r 215b799fa181 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/arch/ia64/xen/xensetup.c Fri Mar 09 18:26:47 2007 +0000
@@ -543,8 +543,6 @@ printk("num_online_cpus=%d, max_cpus=%d\
domain0_ready = 1;
- schedulers_start();
-
domain_unpause_by_systemcontroller(dom0);
startup_cpu_idle_loop();
diff -r 1a01d8d9dbec -r 215b799fa181 xen/arch/powerpc/setup.c
--- a/xen/arch/powerpc/setup.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/arch/powerpc/setup.c Fri Mar 09 18:26:47 2007 +0000
@@ -365,9 +365,6 @@ static void __init __start_xen(multiboot
kick_secondary_cpus(max_cpus);
}
- /* Secondary processors must be online before we call this. */
- schedulers_start();
-
/* This cannot be called before secondary cpus are marked online. */
percpu_free_unused_areas();
diff -r 1a01d8d9dbec -r 215b799fa181 xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/arch/x86/apic.c Fri Mar 09 18:26:47 2007 +0000
@@ -1023,6 +1023,13 @@ int reprogram_timer(s_time_t timeout)
u64 apic_tmict;
/*
+ * If we don't have local APIC then we just poll the timer list off the
+ * PIT interrupt.
+ */
+ if ( !cpu_has_apic )
+ return 1;
+
+ /*
* We use this value because we don't trust zero (we think it may just
* cause an immediate interrupt). At least this is guaranteed to hold it
* off for ages (esp. since the clock ticks on bus clock, not cpu clock!).
@@ -1043,13 +1050,6 @@ int reprogram_timer(s_time_t timeout)
(u32)now, (u32)(timeout>>32),(u32)timeout);
return 0;
}
-
- /*
- * If we don't have local APIC then we just poll the timer list off the
- * PIT interrupt. Cheesy but good enough to work on eg. VMware :-)
- */
- if ( !cpu_has_apic )
- return 1;
/* conversion to bus units */
apic_tmict = (((u64)bus_scale) * expire)>>18;
diff -r 1a01d8d9dbec -r 215b799fa181 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/arch/x86/setup.c Fri Mar 09 18:26:47 2007 +0000
@@ -195,13 +195,13 @@ static void __init percpu_free_unused_ar
/* Find first unused CPU number. */
for ( i = 0; i < NR_CPUS; i++ )
- if ( !cpu_online(i) )
+ if ( !cpu_possible(i) )
break;
first_unused = i;
- /* Check that there are no holes in cpu_online_map. */
+ /* Check that there are no holes in cpu_possible_map. */
for ( ; i < NR_CPUS; i++ )
- BUG_ON(cpu_online(i));
+ BUG_ON(cpu_possible(i));
#ifndef MEMORY_GUARD
init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
@@ -717,8 +717,6 @@ void __init __start_xen(multiboot_info_t
do_initcalls();
- schedulers_start();
-
if ( opt_watchdog )
watchdog_enable();
diff -r 1a01d8d9dbec -r 215b799fa181 xen/common/compat/domain.c
--- a/xen/common/compat/domain.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/common/compat/domain.c Fri Mar 09 18:26:47 2007 +0000
@@ -55,6 +55,10 @@ int compat_vcpu_op(int cmd, int vcpuid,
case VCPUOP_up:
case VCPUOP_down:
case VCPUOP_is_up:
+ case VCPUOP_set_periodic_timer:
+ case VCPUOP_stop_periodic_timer:
+ case VCPUOP_set_singleshot_timer:
+ case VCPUOP_stop_singleshot_timer:
rc = do_vcpu_op(cmd, vcpuid, arg);
break;
diff -r 1a01d8d9dbec -r 215b799fa181 xen/common/domain.c
--- a/xen/common/domain.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/common/domain.c Fri Mar 09 18:26:47 2007 +0000
@@ -102,6 +102,9 @@ struct vcpu *alloc_vcpu(
v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
v->runstate.state_entry_time = NOW();
+ /* VCPUs by default have a 100Hz ticker. */
+ v->periodic_period = MILLISECS(10);
+
if ( (vcpu_id != 0) && !is_idle_domain(d) )
set_bit(_VCPUF_down, &v->vcpu_flags);
@@ -267,6 +270,9 @@ void domain_kill(struct domain *d)
domain_relinquish_resources(d);
put_domain(d);
+ /* Kick page scrubbing after domain_relinquish_resources(). */
+ page_scrub_kick();
+
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
}
@@ -589,6 +595,61 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
break;
}
+ case VCPUOP_set_periodic_timer:
+ {
+ struct vcpu_set_periodic_timer set;
+
+ rc = -EFAULT;
+ if ( copy_from_guest(&set, arg, 1) )
+ break;
+
+ rc = -EINVAL;
+ if ( set.period_ns < MILLISECS(1) )
+ break;
+
+ v->periodic_period = set.period_ns;
+ vcpu_force_reschedule(v);
+
+ break;
+ }
+
+ case VCPUOP_stop_periodic_timer:
+ {
+ v->periodic_period = 0;
+ vcpu_force_reschedule(v);
+ break;
+ }
+
+ case VCPUOP_set_singleshot_timer:
+ {
+ struct vcpu_set_singleshot_timer set;
+
+ if ( v != current )
+ return -EINVAL;
+
+ if ( copy_from_guest(&set, arg, 1) )
+ return -EFAULT;
+
+ if ( v->singleshot_timer.cpu != smp_processor_id() )
+ {
+ stop_timer(&v->singleshot_timer);
+ v->singleshot_timer.cpu = smp_processor_id();
+ }
+
+ set_timer(&v->singleshot_timer, set.timeout_abs_ns);
+
+ break;
+ }
+
+ case VCPUOP_stop_singleshot_timer:
+ {
+ if ( v != current )
+ return -EINVAL;
+
+ stop_timer(&v->singleshot_timer);
+ break;
+ }
+
default:
rc = arch_do_vcpu_op(cmd, v, arg);
break;
diff -r 1a01d8d9dbec -r 215b799fa181 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/common/page_alloc.c Fri Mar 09 18:26:47 2007 +0000
@@ -970,6 +970,8 @@ __initcall(pagealloc_keyhandler_init);
* PAGE SCRUBBING
*/
+static DEFINE_PER_CPU(struct timer, page_scrub_timer);
+
static void page_scrub_softirq(void)
{
struct list_head *ent;
@@ -978,7 +980,7 @@ static void page_scrub_softirq(void)
int i;
s_time_t start = NOW();
- /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+ /* Aim to do 1ms of work every 10ms. */
do {
spin_lock(&page_scrub_lock);
@@ -1014,6 +1016,13 @@ static void page_scrub_softirq(void)
free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
}
} while ( (NOW() - start) < MILLISECS(1) );
+
+ set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10));
+}
+
+static void page_scrub_timer_fn(void *unused)
+{
+ page_scrub_schedule_work();
}
unsigned long avail_scrub_pages(void)
@@ -1049,6 +1058,10 @@ __initcall(register_heap_trigger);
static __init int page_scrub_init(void)
{
+ int cpu;
+ for_each_cpu ( cpu )
+ init_timer(&per_cpu(page_scrub_timer, cpu),
+ page_scrub_timer_fn, NULL, cpu);
open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
return 0;
}
diff -r 1a01d8d9dbec -r 215b799fa181 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/common/sched_credit.c Fri Mar 09 18:26:47 2007 +0000
@@ -186,6 +186,8 @@ struct csched_pcpu {
struct csched_pcpu {
struct list_head runq;
uint32_t runq_sort_last;
+ struct timer ticker;
+ unsigned int tick;
};
/*
@@ -245,7 +247,7 @@ struct csched_private {
*/
static struct csched_private csched_priv;
-
+static void csched_tick(void *_cpu);
static inline int
__cycle_cpu(int cpu, const cpumask_t *mask)
@@ -362,12 +364,13 @@ csched_pcpu_init(int cpu)
if ( csched_priv.master >= csched_priv.ncpus )
csched_priv.master = cpu;
+ init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = csched_priv.runq_sort;
per_cpu(schedule_data, cpu).sched_priv = spc;
/* Start off idling... */
- BUG_ON( !is_idle_vcpu(per_cpu(schedule_data, cpu).curr) );
+ BUG_ON(!is_idle_vcpu(per_cpu(schedule_data, cpu).curr));
cpu_set(cpu, csched_priv.idlers);
spin_unlock_irqrestore(&csched_priv.lock, flags);
@@ -1013,8 +1016,13 @@ csched_acct(void)
}
static void
-csched_tick(unsigned int cpu)
-{
+csched_tick(void *_cpu)
+{
+ unsigned int cpu = (unsigned long)_cpu;
+ struct csched_pcpu *spc = CSCHED_PCPU(cpu);
+
+ spc->tick++;
+
/*
* Accounting for running VCPU
*/
@@ -1028,7 +1036,7 @@ csched_tick(unsigned int cpu)
* we could distribute or at the very least cycle the duty.
*/
if ( (csched_priv.master == cpu) &&
- (per_cpu(schedule_data, cpu).tick % CSCHED_TICKS_PER_ACCT) == 0 )
+ (spc->tick % CSCHED_TICKS_PER_ACCT) == 0 )
{
csched_acct();
}
@@ -1041,6 +1049,8 @@ csched_tick(unsigned int cpu)
* once per accounting period (currently 30 milliseconds).
*/
csched_runq_sort(cpu);
+
+ set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
}
static struct csched_vcpu *
@@ -1248,8 +1258,7 @@ csched_dump_pcpu(int cpu)
spc = CSCHED_PCPU(cpu);
runq = &spc->runq;
- printk(" tick=%lu, sort=%d, sibling=0x%lx, core=0x%lx\n",
- per_cpu(schedule_data, cpu).tick,
+ printk(" sort=%d, sibling=0x%lx, core=0x%lx\n",
spc->runq_sort_last,
cpu_sibling_map[cpu].bits[0],
cpu_core_map[cpu].bits[0]);
@@ -1341,6 +1350,22 @@ csched_init(void)
CSCHED_STATS_RESET();
}
+/* Tickers cannot be kicked until SMP subsystem is alive. */
+static __init int csched_start_tickers(void)
+{
+ struct csched_pcpu *spc;
+ unsigned int cpu;
+
+ for_each_online_cpu ( cpu )
+ {
+ spc = CSCHED_PCPU(cpu);
+ set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
+ }
+
+ return 0;
+}
+__initcall(csched_start_tickers);
+
struct scheduler sched_credit_def = {
.name = "SMP Credit Scheduler",
@@ -1359,7 +1384,6 @@ struct scheduler sched_credit_def = {
.adjust = csched_dom_cntl,
.pick_cpu = csched_cpu_pick,
- .tick = csched_tick,
.do_schedule = csched_schedule,
.dump_cpu_state = csched_dump_pcpu,
diff -r 1a01d8d9dbec -r 215b799fa181 xen/common/schedule.c
--- a/xen/common/schedule.c Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/common/schedule.c Fri Mar 09 18:26:47 2007 +0000
@@ -45,8 +45,8 @@ boolean_param("dom0_vcpus_pin", opt_dom0
/* Various timer handlers. */
static void s_timer_fn(void *unused);
-static void t_timer_fn(void *unused);
-static void vcpu_timer_fn(void *data);
+static void vcpu_periodic_timer_fn(void *data);
+static void vcpu_singleshot_timer_fn(void *data);
static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
@@ -65,9 +65,6 @@ static struct scheduler ops;
#define SCHED_OP(fn, ...) \
(( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
: (typeof(ops.fn(__VA_ARGS__)))0 )
-
-/* Per-CPU periodic timer sends an event to the currently-executing domain. */
-static DEFINE_PER_CPU(struct timer, t_timer);
static inline void vcpu_runstate_change(
struct vcpu *v, int new_state, s_time_t new_entry_time)
@@ -114,8 +111,12 @@ int sched_init_vcpu(struct vcpu *v, unsi
cpus_setall(v->cpu_affinity);
/* Initialise the per-domain timers. */
- init_timer(&v->timer, vcpu_timer_fn, v, v->processor);
- init_timer(&v->poll_timer, poll_timer_fn, v, v->processor);
+ init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
+ v, v->processor);
+ init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
+ v, v->processor);
+ init_timer(&v->poll_timer, poll_timer_fn,
+ v, v->processor);
/* Idle VCPUs are scheduled immediately. */
if ( is_idle_domain(d) )
@@ -132,7 +133,8 @@ int sched_init_vcpu(struct vcpu *v, unsi
void sched_destroy_vcpu(struct vcpu *v)
{
- kill_timer(&v->timer);
+ kill_timer(&v->periodic_timer);
+ kill_timer(&v->singleshot_timer);
kill_timer(&v->poll_timer);
SCHED_OP(destroy_vcpu, v);
}
@@ -223,10 +225,29 @@ static void vcpu_migrate(struct vcpu *v)
vcpu_wake(v);
}
+/*
+ * Force a VCPU through a deschedule/reschedule path.
+ * For example, using this when setting the periodic timer period means that
+ * most periodic-timer state need only be touched from within the scheduler
+ * which can thus be done without need for synchronisation.
+ */
+void vcpu_force_reschedule(struct vcpu *v)
+{
+ vcpu_schedule_lock_irq(v);
+ if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
+ set_bit(_VCPUF_migrating, &v->vcpu_flags);
+ vcpu_schedule_unlock_irq(v);
+
+ if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
+ {
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
+ }
+}
+
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
{
cpumask_t online_affinity;
- unsigned long flags;
if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
return -EINVAL;
@@ -235,13 +256,13 @@ int vcpu_set_affinity(struct vcpu *v, cp
if ( cpus_empty(online_affinity) )
return -EINVAL;
- vcpu_schedule_lock_irqsave(v, flags);
+ vcpu_schedule_lock_irq(v);
v->cpu_affinity = *affinity;
if ( !cpu_isset(v->processor, v->cpu_affinity) )
set_bit(_VCPUF_migrating, &v->vcpu_flags);
- vcpu_schedule_unlock_irqrestore(v, flags);
+ vcpu_schedule_unlock_irq(v);
if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
{
@@ -458,7 +479,7 @@ long do_set_timer_op(s_time_t timeout)
if ( timeout == 0 )
{
- stop_timer(&v->timer);
+ stop_timer(&v->singleshot_timer);
}
else if ( unlikely(timeout < 0) || /* overflow into 64th bit? */
unlikely((offset > 0) && ((uint32_t)(offset >> 50) != 0)) )
@@ -474,14 +495,20 @@ long do_set_timer_op(s_time_t timeout)
* timeout in this case can burn a lot of CPU. We therefore go for a
* reasonable middleground of triggering a timer event in 100ms.
*/
- gdprintk(XENLOG_INFO, "Warning: huge timeout set by domain %d (vcpu
%d):"
- " %"PRIx64"\n",
+ gdprintk(XENLOG_INFO, "Warning: huge timeout set by domain %d "
+ "(vcpu %d): %"PRIx64"\n",
v->domain->domain_id, v->vcpu_id, (uint64_t)timeout);
- set_timer(&v->timer, NOW() + MILLISECS(100));
+ set_timer(&v->singleshot_timer, NOW() + MILLISECS(100));
}
else
{
- set_timer(&v->timer, timeout);
+ if ( v->singleshot_timer.cpu != smp_processor_id() )
+ {
+ stop_timer(&v->singleshot_timer);
+ v->singleshot_timer.cpu = smp_processor_id();
+ }
+
+ set_timer(&v->singleshot_timer, timeout);
}
return 0;
@@ -540,6 +567,28 @@ long sched_adjust(struct domain *d, stru
return 0;
}
+static void vcpu_periodic_timer_work(struct vcpu *v)
+{
+ s_time_t now = NOW();
+ uint64_t periodic_next_event;
+
+ ASSERT(!active_timer(&v->periodic_timer));
+
+ if ( v->periodic_period == 0 )
+ return;
+
+ periodic_next_event = v->periodic_last_event + v->periodic_period;
+ if ( now > periodic_next_event )
+ {
+ send_timer_event(v);
+ v->periodic_last_event = now;
+ periodic_next_event = now + v->periodic_period;
+ }
+
+ v->periodic_timer.cpu = smp_processor_id();
+ set_timer(&v->periodic_timer, periodic_next_event);
+}
+
/*
* The main function
* - deschedule the current domain (scheduler independent).
@@ -606,14 +655,13 @@ static void schedule(void)
perfc_incrc(sched_ctx);
- prev->sleep_tick = sd->tick;
+ stop_timer(&prev->periodic_timer);
/* Ensure that the domain has an up-to-date time base. */
if ( !is_idle_vcpu(next) )
{
update_vcpu_system_time(next);
- if ( next->sleep_tick != sd->tick )
- send_timer_event(next);
+ vcpu_periodic_timer_work(next);
}
TRACE_4D(TRC_SCHED_SWITCH,
@@ -631,13 +679,6 @@ void context_saved(struct vcpu *prev)
vcpu_migrate(prev);
}
-/****************************************************************************
- * Timers: the scheduler utilises a number of timers
- * - s_timer: per CPU timer for preemption and scheduling decisions
- * - t_timer: per CPU periodic timer to send timer interrupt to current dom
- * - dom_timer: per domain timer to specifiy timeout values
- ****************************************************************************/
-
/* The scheduler timer: force a run through the scheduler */
static void s_timer_fn(void *unused)
{
@@ -645,28 +686,15 @@ static void s_timer_fn(void *unused)
perfc_incrc(sched_irq);
}
-/* Periodic tick timer: send timer event to current domain */
-static void t_timer_fn(void *unused)
-{
- struct vcpu *v = current;
-
- this_cpu(schedule_data).tick++;
-
- if ( !is_idle_vcpu(v) )
- {
- update_vcpu_system_time(v);
- send_timer_event(v);
- }
-
- page_scrub_schedule_work();
-
- SCHED_OP(tick, smp_processor_id());
-
- set_timer(&this_cpu(t_timer), NOW() + MILLISECS(10));
-}
-
-/* Per-VCPU timer function: sends a virtual timer interrupt. */
-static void vcpu_timer_fn(void *data)
+/* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
+static void vcpu_periodic_timer_fn(void *data)
+{
+ struct vcpu *v = data;
+ vcpu_periodic_timer_work(v);
+}
+
+/* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
+static void vcpu_singleshot_timer_fn(void *data)
{
struct vcpu *v = data;
send_timer_event(v);
@@ -691,7 +719,6 @@ void __init scheduler_init(void)
{
spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
- init_timer(&per_cpu(t_timer, i), t_timer_fn, NULL, i);
}
for ( i = 0; schedulers[i] != NULL; i++ )
@@ -706,16 +733,6 @@ void __init scheduler_init(void)
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
SCHED_OP(init);
-}
-
-/*
- * Start a scheduler for each CPU
- * This has to be done *after* the timers, e.g., APICs, have been initialised
- */
-void schedulers_start(void)
-{
- t_timer_fn(0);
- smp_call_function((void *)t_timer_fn, NULL, 1, 1);
}
void dump_runq(unsigned char key)
diff -r 1a01d8d9dbec -r 215b799fa181 xen/include/public/vcpu.h
--- a/xen/include/public/vcpu.h Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/include/public/vcpu.h Fri Mar 09 18:26:47 2007 +0000
@@ -42,13 +42,13 @@
* @extra_arg == pointer to vcpu_guest_context structure containing initial
* state for the VCPU.
*/
-#define VCPUOP_initialise 0
+#define VCPUOP_initialise 0
/*
* Bring up a VCPU. This makes the VCPU runnable. This operation will fail
* if the VCPU has not been initialised (VCPUOP_initialise).
*/
-#define VCPUOP_up 1
+#define VCPUOP_up 1
/*
* Bring down a VCPU (i.e., make it non-runnable).
@@ -64,16 +64,16 @@
* practise to move a VCPU onto an 'idle' or default page table, LDT and
* GDT before bringing it down.
*/
-#define VCPUOP_down 2
+#define VCPUOP_down 2
/* Returns 1 if the given VCPU is up. */
-#define VCPUOP_is_up 3
+#define VCPUOP_is_up 3
/*
* Return information about the state and running time of a VCPU.
* @extra_arg == pointer to vcpu_runstate_info structure.
*/
-#define VCPUOP_get_runstate_info 4
+#define VCPUOP_get_runstate_info 4
struct vcpu_runstate_info {
/* VCPU's current state (RUNSTATE_*). */
int state;
@@ -128,6 +128,32 @@ struct vcpu_register_runstate_memory_are
} addr;
};
typedef struct vcpu_register_runstate_memory_area
vcpu_register_runstate_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+ uint64_t period_ns;
+};
+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+ uint64_t timeout_abs_ns;
+};
+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
#endif /* __XEN_PUBLIC_VCPU_H__ */
diff -r 1a01d8d9dbec -r 215b799fa181 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/include/xen/mm.h Fri Mar 09 18:26:47 2007 +0000
@@ -92,6 +92,11 @@ extern struct list_head page_scrub_list;
if ( !list_empty(&page_scrub_list) ) \
raise_softirq(PAGE_SCRUB_SOFTIRQ); \
} while ( 0 )
+#define page_scrub_kick() \
+ do { \
+ if ( !list_empty(&page_scrub_list) ) \
+ cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \
+ } while ( 0 )
unsigned long avail_scrub_pages(void);
#include <asm/mm.h>
diff -r 1a01d8d9dbec -r 215b799fa181 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/include/xen/sched-if.h Fri Mar 09 18:26:47 2007 +0000
@@ -16,7 +16,6 @@ struct schedule_data {
struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
struct timer s_timer; /* scheduling timer */
- unsigned long tick; /* current periodic 'tick' */
} __cacheline_aligned;
DECLARE_PER_CPU(struct schedule_data, schedule_data);
@@ -61,7 +60,6 @@ struct scheduler {
unsigned int sched_id; /* ID for this scheduler */
void (*init) (void);
- void (*tick) (unsigned int cpu);
int (*init_domain) (struct domain *);
void (*destroy_domain) (struct domain *);
diff -r 1a01d8d9dbec -r 215b799fa181 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/include/xen/sched.h Fri Mar 09 18:26:47 2007 +0000
@@ -79,8 +79,10 @@ struct vcpu
struct vcpu *next_in_list;
- struct timer timer; /* one-shot timer for timeout values */
- unsigned long sleep_tick; /* tick at which this vcpu started sleep */
+ uint64_t periodic_period;
+ uint64_t periodic_last_event;
+ struct timer periodic_timer;
+ struct timer singleshot_timer;
struct timer poll_timer; /* timeout for SCHEDOP_poll */
@@ -332,7 +334,6 @@ void __domain_crash_synchronous(void) __
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
-void schedulers_start(void);
int sched_init_vcpu(struct vcpu *v, unsigned int processor);
void sched_destroy_vcpu(struct vcpu *v);
int sched_init_domain(struct domain *d);
@@ -497,6 +498,7 @@ void domain_unpause_by_systemcontroller(
void domain_unpause_by_systemcontroller(struct domain *d);
void cpu_init(void);
+void vcpu_force_reschedule(struct vcpu *v);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
diff -r 1a01d8d9dbec -r 215b799fa181 xen/include/xen/timer.h
--- a/xen/include/xen/timer.h Fri Mar 09 18:26:22 2007 +0000
+++ b/xen/include/xen/timer.h Fri Mar 09 18:26:47 2007 +0000
@@ -35,7 +35,7 @@ struct timer {
* The timer must *previously* have been initialised by init_timer(), or its
* structure initialised to all-zeroes.
*/
-static __inline__ int active_timer(struct timer *timer)
+static inline int active_timer(struct timer *timer)
{
return (timer->heap_offset != 0);
}
@@ -46,7 +46,7 @@ static __inline__ int active_timer(struc
* time (and multiple times) on an inactive timer. It must *never* execute
* concurrently with any other operation on the same timer.
*/
-static __inline__ void init_timer(
+static inline void init_timer(
struct timer *timer,
void (*function)(void *),
void *data,
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|