WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH] 2/4 "nemesis" scheduling domains for Xen

To: <ncmike@xxxxxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH] 2/4 "nemesis" scheduling domains for Xen
From: Keir Fraser <Keir.Fraser@xxxxxxxxxxxx>
Date: Sat, 05 May 2007 09:18:44 +0100
Delivery-date: Sat, 05 May 2007 01:15:33 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20070504222432.GC26054@xxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AceO7f8BPauxI/rhEdu0JgAWy6hiGQ==
Thread-topic: [Xen-devel] [PATCH] 2/4 "nemesis" scheduling domains for Xen
User-agent: Microsoft-Entourage/11.3.3.061214
Some comments:

The nemesis/sdom/ndom/adom nomenclature is not very helpful. I think it
could be clearer and simpler. For example, perhaps sched_group (== nemesis,
in your terminology I think?) and group_master (== adom)? sdom doesn't seem
so useful since every domain is schedulable and hence an sdom. And I don't
know what an ndom is: it looks a bit like an adom but I guess it must be
different.

Don't add extra bit flags. Define some bool_t members and use those. Results
in clearer and faster code.

Replace BUG_ON(1) with BUG().

Stick to the coding style of the file you're editing (spaces inside (),
etc).

Some kind of brief description of how the sched_credit changes work would be
very useful. Not much text required, but something at a lower level of
detail than in your summit presentation, just to make it clearer how the
various changes in this file fit together into a whole.

 Thanks!
 Keir


On 4/5/07 23:24, "Mike D. Day" <ncmike@xxxxxxxxxx> wrote:

> Credit scheduler implementation of scheduling domains.
> 
> signed-off-by: Mike D. Day <ncmike@xxxxxxxxxx>
> 
> --
>  sched_credit.c |  262
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 253 insertions(+), 9 deletions(-)
> 
> --
> diff -r 8b71c838aff8 xen/common/sched_credit.c
> --- a/xen/common/sched_credit.c Fri May 04 11:21:36 2007 -0400
> +++ b/xen/common/sched_credit.c Fri May 04 17:48:54 2007 -0400
> @@ -219,10 +219,14 @@ struct csched_dom {
>  struct csched_dom {
>      struct list_head active_vcpu;
>      struct list_head active_sdom_elem;
> +    struct list_head nemesis_doms;
> +    struct list_head nemesis_elem;
> +    struct csched_dom *ndom;
>      struct domain *dom;
>      uint16_t active_vcpu_count;
>      uint16_t weight;
>      uint16_t cap;
> +    uint16_t flags;
>  };
>  
>  /*
> @@ -344,6 +348,118 @@ __runq_tickle(unsigned int cpu, struct c
>          cpumask_raise_softirq(mask, SCHEDULE_SOFTIRQ);
>  }
>  
> +static inline struct csched_dom *csched_dom(struct domain *d)
> +{
> +    return (struct csched_dom *)d->sched_priv;
> +}
> +
> +static inline struct csched_dom *get_nemesis_dom(struct csched_dom *d)
> +{
> +    if (d->flags & SDOM_IS_ADOM)
> +    {
> +        if (get_domain(d->ndom->dom))
> +            return d->ndom;
> +        BUG_ON(1);
> +    }
> +
> +    return NULL;
> +}
> +
> +static inline struct csched_dom *accounting_dom(struct csched_dom *d)
> +{
> +    if (d->flags & SDOM_IS_ADOM)
> +        return d->ndom;
> +    return d;
> +}
> +
> +static inline void delegate_active_vcpus(struct csched_dom *adom,
> +                                         struct csched_dom *ndom)
> +{
> +    BUG_ON(! (adom->flags & SDOM_IS_ADOM));
> +    BUG_ON(adom->ndom != ndom);
> +    if (adom->flags & SDOM_IS_ADOM && adom->ndom == ndom)
> +    {
> +        struct list_head *elem;
> +
> +        while (!list_empty(&adom->active_vcpu))
> +        {
> +            elem = adom->active_vcpu.next;
> +            list_del(elem);
> +            list_add(elem, &ndom->active_vcpu);
> +            adom->active_vcpu_count--;
> +            ndom->active_vcpu_count++;
> +        }
> +
> +        if (!list_empty(&adom->active_sdom_elem))
> +        {
> +            list_del_init(&adom->active_sdom_elem);
> +            csched_priv.weight -= adom->weight;
> +        }
> +
> +        if (list_empty(&ndom->active_sdom_elem))
> +        {
> +            list_add(&ndom->active_sdom_elem, &csched_priv.active_sdom);
> +            csched_priv.weight += ndom->weight;
> +        }
> +    }
> +}
> +
> +static inline void reclaim_active_vcpus(struct csched_dom *ndom,
> +                                        struct csched_dom *adom)
> +{
> +    BUG_ON(!(ndom->flags & SDOM_ACTIVE));
> +    BUG_ON(adom->ndom != ndom);
> +    if (ndom->flags & SDOM_ACTIVE && adom->ndom == ndom)
> +    {
> +        struct csched_vcpu *p, *n;
> +
> +        list_for_each_entry_safe(p, n, &ndom->active_vcpu, active_vcpu_elem)
> +        {
> +            if (p->sdom == adom)
> +            {
> +                list_del(&p->active_vcpu_elem);
> +                list_add(&p->active_vcpu_elem, &adom->active_vcpu);
> +                adom->active_vcpu_count++;
> +                ndom->active_vcpu_count--;
> +            }
> +        }
> +
> +        if (list_empty(&ndom->active_vcpu) &&
> +            !list_empty(&ndom->active_sdom_elem))
> +        {
> +            list_del_init(&ndom->active_sdom_elem);
> +            csched_priv.weight -= ndom->weight;
> +        }
> +        if (!list_empty(&adom->active_vcpu) &&
> +            list_empty(&adom->active_sdom_elem))
> +        {
> +            list_add(&adom->active_sdom_elem, &csched_priv.active_sdom);
> +            csched_priv.weight += adom->weight;
> +        }
> +    }
> +}
> +
> +static inline void add_adom_to_ndom(struct csched_dom *adom,
> +                                    struct csched_dom *ndom)
> +{
> +    list_add(&adom->nemesis_elem, &ndom->nemesis_doms);
> +    adom->ndom = ndom;
> +    adom->flags |= SDOM_IS_ADOM;
> +    ndom->flags |= SDOM_ACTIVE;
> +    delegate_active_vcpus(adom, ndom);
> +}
> +
> +static inline void rem_adom_from_ndom(struct csched_dom *adom,
> +                                      struct csched_dom *ndom)
> +{
> +    reclaim_active_vcpus(ndom, adom);
> +    adom->flags &= ~SDOM_IS_ADOM;
> +    adom->ndom = 0;
> +    list_del(&adom->nemesis_elem);
> +    if (list_empty(&ndom->nemesis_doms))
> +        ndom->flags &= ~SDOM_ACTIVE;
> +}
> +
>  static int
>  csched_pcpu_init(int cpu)
>  {
> @@ -395,6 +511,17 @@ __csched_vcpu_check(struct vcpu *vc)
>      else
>      {
>          BUG_ON( !is_idle_vcpu(vc) );
> +    }
> +
> +    if (sdom->flags & SDOM_ACTIVE)
> +    {
> +        BUG_ON(list_empty(&sdom->nemesis_doms));
> +        BUG_ON(sdom->flags & SDOM_IS_ADOM);
> +    }
> +    if (sdom->flags & SDOM_IS_ADOM)
> +    {
> +        BUG_ON(list_empty(&sdom->nemesis_elem));
> +        BUG_ON(sdom->flags & SDOM_ACTIVE);
>      }
>  
>      CSCHED_STAT_CRANK(vcpu_check);
> @@ -486,11 +613,11 @@ static inline void
>  static inline void
>  __csched_vcpu_acct_start(struct csched_vcpu *svc)
>  {
> -    struct csched_dom * const sdom = svc->sdom;
>      unsigned long flags;
> -
> +    struct csched_dom * sdom;
>      spin_lock_irqsave(&csched_priv.lock, flags);
>  
> +    sdom = accounting_dom(svc->sdom);
>      if ( list_empty(&svc->active_vcpu_elem) )
>      {
>          CSCHED_VCPU_STAT_CRANK(svc, state_active);
> @@ -504,14 +631,13 @@ __csched_vcpu_acct_start(struct csched_v
>              csched_priv.weight += sdom->weight;
>          }
>      }
> -
>      spin_unlock_irqrestore(&csched_priv.lock, flags);
>  }
>  
>  static inline void
>  __csched_vcpu_acct_stop_locked(struct csched_vcpu *svc)
>  {
> -    struct csched_dom * const sdom = svc->sdom;
> +    struct csched_dom * const sdom = accounting_dom(svc->sdom);
>  
>      BUG_ON( list_empty(&svc->active_vcpu_elem) );
>  
> @@ -605,20 +731,33 @@ csched_vcpu_init(struct vcpu *vc)
>      return 0;
>  }
>  
> +static void nemesis_cleanup(struct csched_vcpu *svc)
> +{
> +    if (svc->sdom->flags & SDOM_IS_ADOM)
> +        rem_adom_from_ndom(svc->sdom, accounting_dom(svc->sdom));
> +    if (svc->sdom->flags & SDOM_ACTIVE)
> +    {
> +        struct csched_dom *p, *n;
> +        list_for_each_entry_safe(p, n, &svc->sdom->nemesis_doms,
> nemesis_elem)
> +        {
> +            rem_adom_from_ndom(p, svc->sdom);
> +        }
> +    }
> +}
> +
> +
>  static void
>  csched_vcpu_destroy(struct vcpu *vc)
>  {
>      struct csched_vcpu * const svc = CSCHED_VCPU(vc);
> -    struct csched_dom * const sdom = svc->sdom;
>      unsigned long flags;
>  
>      CSCHED_STAT_CRANK(vcpu_destroy);
>  
> -    BUG_ON( sdom == NULL );
>      BUG_ON( !list_empty(&svc->runq_elem) );
>  
>      spin_lock_irqsave(&csched_priv.lock, flags);
> -
> +    nemesis_cleanup(svc);
>      if ( !list_empty(&svc->active_vcpu_elem) )
>          __csched_vcpu_acct_stop_locked(svc);
>  
> @@ -697,6 +836,108 @@ csched_vcpu_wake(struct vcpu *vc)
>      __runq_tickle(cpu, svc);
>  }
>  
> +static inline int
> +_sanity_check(struct csched_dom *adom, struct csched_dom *ndom)
> +{
> +    if (adom->dom->domain_id == ndom->dom->domain_id)
> +        return SDOM_err_same_id;
> +    if (adom->flags & SDOM_ACTIVE)
> +        return SDOM_err_already_sdom;
> +    if (ndom->flags & SDOM_IS_ADOM)
> +        return SDOM_err_already_adom;
> +    return 0;
> +}
> +
> +static inline int
> +add_sanity_check(struct csched_dom *adom, struct csched_dom *ndom)
> +{
> +    if (adom->ndom)
> +        return SDOM_err_inval;
> +    return _sanity_check(adom, ndom);
> +}
> +
> +static inline int
> +rem_sanity_check(struct csched_dom *adom, struct csched_dom *ndom)
> +{
> +    if (adom->ndom && adom->ndom == ndom)
> +        return _sanity_check(adom, ndom);
> +    return SDOM_err_inval;
> +}
> +
> +static int csched_sdom_op(struct xen_domctl_sdom * op)
> +{
> +    int ret = -EINVAL;
> +
> +    switch(op->op)
> +    {
> +    case SDOM_get_flags:
> +    case SDOM_get_sdom:
> +    {
> +        struct domain *ndom = get_domain_by_id(op->sdom);
> +        if (ndom)
> +        {
> +            struct csched_dom *p_ndom = csched_dom(ndom);
> +            if (op->op == SDOM_get_flags)
> +                op->flags = p_ndom->flags;
> +            else
> +            {
> +                struct csched_dom *nemesis_dom = get_nemesis_dom(p_ndom);
> +                if (nemesis_dom)
> +                {
> +                    op->sdom = nemesis_dom->dom->domain_id;
> +                    put_domain(nemesis_dom->dom);
> +                }
> +                else
> +                    op->reason = SDOM_err_not_adom;
> +            }
> +            put_domain(ndom);
> +            ret = 0;
> +        }
> +        break;
> +    }
> +
> +    case SDOM_del_adom:
> +    case SDOM_add_adom:
> +    {
> +
> +        struct domain *adom, *ndom;
> +        unsigned long flags;
> +
> +        ndom  = get_domain_by_id(op->sdom);
> +        if (!ndom)
> +            break;
> +        adom = get_domain_by_id(op->adom);
> +        if (!adom)
> +            goto release_ndom;
> +        ret = 0;
> +        if (op->op == SDOM_add_adom)
> +            op->reason = add_sanity_check(csched_dom(adom),
> csched_dom(ndom));
> +        else
> +            op->reason = rem_sanity_check(csched_dom(adom),
> csched_dom(ndom));
> +        if (op->reason)
> +            goto release_adom;
> +
> +        spin_lock_irqsave(&csched_priv.lock, flags);
> +        if (op->op == SDOM_add_adom)
> +            add_adom_to_ndom(csched_dom(adom), csched_dom(ndom));
> +        else
> +            rem_adom_from_ndom(csched_dom(adom), csched_dom(ndom));
> +        spin_unlock_irqrestore(&csched_priv.lock, flags);
> +
> +release_adom:
> +        put_domain(adom);
> +release_ndom:
> +        put_domain(ndom);
> +
> +        break;
> +    }
> +    default:
> +        break;
> +    }
> +
> +    return ret;
> +}
> +
>  static int
>  csched_dom_cntl(
>      struct domain *d,
> @@ -754,10 +995,13 @@ csched_dom_init(struct domain *dom)
>      sdom->active_vcpu_count = 0;
>      INIT_LIST_HEAD(&sdom->active_sdom_elem);
>      sdom->dom = dom;
> +    sdom->ndom = 0;
>      sdom->weight = CSCHED_DEFAULT_WEIGHT;
>      sdom->cap = 0U;
>      dom->sched_priv = sdom;
> -
> +    INIT_LIST_HEAD(&sdom->nemesis_doms);
> +    INIT_LIST_HEAD(&sdom->nemesis_elem);
> +    sdom->flags = 0U;
>      return 0;
>  }
>  
> @@ -942,7 +1186,6 @@ csched_acct(void)
>          list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
>          {
>              svc = list_entry(iter_vcpu, struct csched_vcpu,
> active_vcpu_elem);
> -            BUG_ON( sdom != svc->sdom );
>  
>              /* Increment credit */
>              atomic_add(credit_fair, &svc->credit);
> @@ -1384,6 +1627,7 @@ struct scheduler sched_credit_def = {
>      .sleep          = csched_vcpu_sleep,
>      .wake           = csched_vcpu_wake,
>  
> +    .sdom_op        = csched_sdom_op,
>      .adjust         = csched_dom_cntl,
>  
>      .pick_cpu       = csched_cpu_pick,
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>