# HG changeset patch # User Juergen Gross # Date 1296457800 -3600 # Node ID 76e1f7018b01c1a155c1fa5124fb4d30f2d3edc0 # Parent 52e928af363726b96698ee2ecc9c3d6c0b08678d Check for memory allocation failure on switching schedulers When switching schedulers on a physical cpu due to a cpupool operation check for a potential memory allocation failure and stop the operation gracefully. Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r 52e928af3637 -r 76e1f7018b01 xen/common/cpupool.c --- a/xen/common/cpupool.c Fri Jan 28 19:37:49 2011 +0000 +++ b/xen/common/cpupool.c Mon Jan 31 08:10:00 2011 +0100 @@ -202,10 +202,20 @@ static int cpupool_destroy(struct cpupoo */ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) { + int ret; + struct cpupool *old; + if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) ) return -EBUSY; + old = per_cpu(cpupool, cpu); per_cpu(cpupool, cpu) = c; - schedule_cpu_switch(cpu, c); + ret = schedule_cpu_switch(cpu, c); + if ( ret ) + { + per_cpu(cpupool, cpu) = old; + return ret; + } + cpu_clear(cpu, cpupool_free_cpus); if (cpupool_moving_cpu == cpu) { @@ -230,12 +240,19 @@ static long cpupool_unassign_cpu_helper( cpu_set(cpu, cpupool_free_cpus); if ( !ret ) { - schedule_cpu_switch(cpu, NULL); + ret = schedule_cpu_switch(cpu, NULL); + if ( ret ) + { + cpu_clear(cpu, cpupool_free_cpus); + goto out; + } per_cpu(cpupool, cpu) = NULL; cpupool_moving_cpu = -1; cpupool_put(cpupool_cpu_moving); cpupool_cpu_moving = NULL; } + +out: spin_unlock(&cpupool_lock); return ret; } diff -r 52e928af3637 -r 76e1f7018b01 xen/common/schedule.c --- a/xen/common/schedule.c Fri Jan 28 19:37:49 2011 +0000 +++ b/xen/common/schedule.c Mon Jan 31 08:10:00 2011 +0100 @@ -1288,7 +1288,7 @@ void __init scheduler_init(void) BUG(); } -void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) +int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { unsigned long flags; struct vcpu *idle; @@ -1297,11 +1297,18 @@ void schedule_cpu_switch(unsigned int cp struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; if ( old_ops == new_ops ) - return; + return 0; idle = idle_vcpu[cpu]; ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); + if ( ppriv == NULL ) + return -ENOMEM; vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv); + if ( vpriv == NULL ) + { + SCHED_OP(new_ops, free_pdata, ppriv, cpu); + return -ENOMEM; + } pcpu_schedule_lock_irqsave(cpu, flags); @@ -1318,6 +1325,8 @@ void schedule_cpu_switch(unsigned int cp SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); + + return 0; } struct scheduler *scheduler_get_default(void) diff -r 52e928af3637 -r 76e1f7018b01 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Fri Jan 28 19:37:49 2011 +0000 +++ b/xen/include/xen/sched.h Mon Jan 31 08:10:00 2011 +0100 @@ -607,7 +607,7 @@ struct scheduler *scheduler_get_default( struct scheduler *scheduler_get_default(void); struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr); void scheduler_free(struct scheduler *sched); -void schedule_cpu_switch(unsigned int cpu, struct cpupool *c); +int schedule_cpu_switch(unsigned int cpu, struct cpupool *c); void vcpu_force_reschedule(struct vcpu *v); int cpu_disable_scheduler(unsigned int cpu); int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);