WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-api

[Xen-API] [PATCH 2 of 9] XCP.PQ: Collect domain runstate information in

To: xen-api@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-API] [PATCH 2 of 9] XCP.PQ: Collect domain runstate information in the hypervisor, and export
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Tue, 11 Jan 2011 10:55:50 +0000
Cc: gianni.tedesco@xxxxxxxxxx, zheng.li@xxxxxxxxxxxxx
Delivery-date: Tue, 11 Jan 2011 03:44:42 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1294743348@xxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-api-request@lists.xensource.com?subject=help>
List-id: Discussion of API issues surrounding Xen <xen-api.lists.xensource.com>
List-post: <mailto:xen-api@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-api>, <mailto:xen-api-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-api>, <mailto:xen-api-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1294743348@xxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-api-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.5.2
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1294742831 0
# Node ID 439cf43e55018acb0b4b287b32e647ef2b893950
# Parent  1d1ae9c19591075dd50f7fb566247adf7945b3b9
XCP.PQ: Collect domain runstate information in the hypervisor, and export
through a domctl operation.

diff -r 1d1ae9c19591 -r 439cf43e5501 xen/common/domain.c
--- a/xen/common/domain.c       Tue Jan 11 10:47:11 2011 +0000
+++ b/xen/common/domain.c       Tue Jan 11 10:47:11 2011 +0000
@@ -238,6 +238,7 @@ struct domain *domain_create(
     spin_lock_init_prof(d, domain_lock);
     spin_lock_init_prof(d, page_alloc_lock);
     spin_lock_init(&d->hypercall_deadlock_mutex);
+    spin_lock_init(&d->runstate_lock);
     INIT_PAGE_LIST_HEAD(&d->page_list);
     INIT_PAGE_LIST_HEAD(&d->xenpage_list);
 
diff -r 1d1ae9c19591 -r 439cf43e5501 xen/common/domctl.c
--- a/xen/common/domctl.c       Tue Jan 11 10:47:11 2011 +0000
+++ b/xen/common/domctl.c       Tue Jan 11 10:47:11 2011 +0000
@@ -970,6 +970,25 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
     }
     break;
 
+    case XEN_DOMCTL_get_runstate_info:
+    {
+        struct domain *d;
+
+        ret = -ESRCH;
+        d = rcu_lock_domain_by_id(op->domain);
+        if ( d != NULL )
+        {
+            domain_runstate_get(d, &op->u.domain_runstate);
+            ret = 0;
+
+            rcu_unlock_domain(d);
+
+            if ( copy_to_guest(u_domctl, op, 1) )
+                ret = -EFAULT;
+        }
+        break;
+    }
+
     default:
         ret = arch_do_domctl(op, u_domctl);
         break;
diff -r 1d1ae9c19591 -r 439cf43e5501 xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Jan 11 10:47:11 2011 +0000
+++ b/xen/common/schedule.c     Tue Jan 11 10:47:11 2011 +0000
@@ -137,9 +137,30 @@ static inline void vcpu_urgent_count_upd
     }
 }
 
+/* Used to quickly map the vcpu runstate mask to a domain runstate */
+static int mask_to_state[] = {
+    /* 000: Nothing in any runstate.  Should never happen. */
+    -1,
+    /* 001: All running */
+    DOMAIN_RUNSTATE_full_run,
+    /* 010: All runnable */
+    DOMAIN_RUNSTATE_full_contention,
+    /* 011: Some running, some runnable */
+    DOMAIN_RUNSTATE_concurrency_hazard,
+    /* 100: All blocked / offline */
+    DOMAIN_RUNSTATE_blocked,
+    /* 101: Some running, some blocked / offline */
+    DOMAIN_RUNSTATE_partial_run,
+    /* 110: Some blocked / offline, some runnable */
+    DOMAIN_RUNSTATE_partial_contention,
+    /* 111: Some in every state.  Mixed running + runnable is most important. 
*/
+    DOMAIN_RUNSTATE_concurrency_hazard
+};
+
 static inline void vcpu_runstate_change(
     struct vcpu *v, int new_state, s_time_t new_entry_time)
 {
+    struct domain *d = v->domain;
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
@@ -157,6 +178,45 @@ static inline void vcpu_runstate_change(
     }
 
     v->runstate.state = new_state;
+
+    /* Update domain runstate */
+    if ( spin_trylock(&d->runstate_lock) )
+    {
+        unsigned mask=0;
+        struct vcpu *ov;
+
+        BUG_ON(d->runstate.state > DOMAIN_RUNSTATE_partial_contention);
+
+        d->runstate.time[d->runstate.state] +=
+            (new_entry_time - d->runstate.state_entry_time);
+        d->runstate.state_entry_time = new_entry_time;
+
+        /* Determine new runstate.  First, see what states we have */
+        for_each_vcpu(d, ov) {
+            /* Don't count vcpus that have beent taken offline by the guest */ 
+            if ( !(ov->runstate.state == RUNSTATE_offline
+                   && test_bit(_VPF_down, &ov->pause_flags)) )
+               mask |= (1 << ov->runstate.state);
+        }
+
+        if ( mask == 0 )
+        {
+            printk("%s: d%d has no online vcpus!\n",
+                   __func__, d->domain_id);
+            mask = 1 << RUNSTATE_offline;
+        }
+
+        /* Offline & blocked are the same */
+        mask |= ((1 << RUNSTATE_offline) & mask) >> 1;
+
+        d->runstate.state = mask_to_state[mask&0x7];
+
+        spin_unlock(&d->runstate_lock);
+    } 
+    else 
+    {
+        atomic_inc(&d->runstate_missed_changes);
+    }
 }
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
@@ -175,6 +235,20 @@ void vcpu_runstate_get(struct vcpu *v, s
         vcpu_schedule_unlock_irq(v);
 }
 
+void domain_runstate_get(struct domain *d, domain_runstate_info_t *runstate)
+{
+    unsigned long flags;
+    /* Have to disable interrupts because the other user of the lock runs 
+     * in interrupt context. */
+    spin_lock_irqsave(&d->runstate_lock, flags);
+
+    memcpy(runstate, &d->runstate, sizeof(*runstate));
+    runstate->time[d->runstate.state] += NOW() - runstate->state_entry_time;
+    runstate->missed_changes = atomic_read(&d->runstate_missed_changes);
+
+    spin_unlock_irqrestore(&d->runstate_lock, flags);
+}
+
 uint64_t get_cpu_idle_time(unsigned int cpu)
 {
     struct vcpu_runstate_info state;
diff -r 1d1ae9c19591 -r 439cf43e5501 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Tue Jan 11 10:47:11 2011 +0000
+++ b/xen/include/public/domctl.h       Tue Jan 11 10:47:11 2011 +0000
@@ -830,6 +830,47 @@ struct xen_domctl_set_access_required {
 typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
 
+/*
+ * Return information about the state and running time of a domain.
+ * The "domain runstate" is based on the runstates of all the vcpus of the
+ * domain (see below).
+ * @extra_arg == pointer to domain_runstate_info structure.
+ */
+struct xen_domctl_runstate_info {
+    /* VCPU's current state (RUNSTATE_*). */
+    uint32_t      state;
+    uint32_t missed_changes;
+    /* Number of times we missed an update due to contention */
+    /* When was current state entered (system time, ns)? */
+    uint64_t state_entry_time;
+    /*
+     * Time spent in each RUNSTATE_* (ns). The sum of these times is
+     * NOT guaranteed not to drift from system time.
+     */
+    uint64_t time[6];
+};
+typedef struct xen_domctl_runstate_info xen_domctl_runstate_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_runstate_info_t);
+
+/* All vcpus are running */
+#define DOMAIN_RUNSTATE_full_run           0
+
+/* All vcpus are runnable (i.e., waiting for cpu) */
+#define DOMAIN_RUNSTATE_full_contention    1
+
+/* Some vcpus are running, some are runnable */
+#define DOMAIN_RUNSTATE_concurrency_hazard 2
+
+/* All vcpus are blocked / offline */
+#define DOMAIN_RUNSTATE_blocked            3
+
+/* Some vpcus are running, some are blocked */
+#define DOMAIN_RUNSTATE_partial_run        4
+
+/* Some vcpus are runnable, some are blocked */
+#define DOMAIN_RUNSTATE_partial_contention 5
+
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -893,6 +934,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_setvcpuextstate               62
 #define XEN_DOMCTL_getvcpuextstate               63
 #define XEN_DOMCTL_set_access_required           64
+#define XEN_DOMCTL_get_runstate_info             98
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -946,6 +988,7 @@ struct xen_domctl {
         struct xen_domctl_vcpuextstate      vcpuextstate;
 #endif
         struct xen_domctl_set_access_required access_required;
+        struct xen_domctl_runstate_info     domain_runstate;
         struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
diff -r 1d1ae9c19591 -r 439cf43e5501 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Tue Jan 11 10:47:11 2011 +0000
+++ b/xen/include/xen/sched.h   Tue Jan 11 10:47:11 2011 +0000
@@ -200,6 +200,8 @@ struct mem_event_domain
     int xen_port;
 };
 
+typedef struct xen_domctl_runstate_info domain_runstate_info_t;
+
 struct domain
 {
     domid_t          domain_id;
@@ -332,6 +334,11 @@ struct domain
     nodemask_t node_affinity;
     unsigned int last_alloc_node;
     spinlock_t node_affinity_lock;
+
+    /* Domain runstates */
+    spinlock_t runstate_lock;
+    atomic_t runstate_missed_changes;
+    domain_runstate_info_t runstate;
 };
 
 struct domain_setup_info
@@ -613,6 +620,8 @@ int cpu_disable_scheduler(unsigned int c
 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
+void domain_runstate_get(struct domain *d, domain_runstate_info_t *runstate);
+
 uint64_t get_cpu_idle_time(unsigned int cpu);
 
 /*

_______________________________________________
xen-api mailing list
xen-api@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/mailman/listinfo/xen-api

<Prev in Thread] Current Thread [Next in Thread>