[prev in list] [next in list] [prev in thread] [next in thread] 

List:       xen-cvs
Subject:    [Xen-changelog] [xen-unstable] credit2: Track expected load
From:       Xen patchbot-unstable <patchbot-unstable () lists ! xensource ! com>
Date:       2010-12-24 23:47:07
Message-ID: 201012242347.oBONl896028116 () xenbits ! xensource ! com
[Download RAW message or body]

# HG changeset patch
# User Keir Fraser <keir@xen.org>
# Date 1293179442 0
# Node ID 6a970abb346f1767523649240a6307e55dfcd76c
# Parent  cf1ea603b340f6b56afd0b4036d5dd99e947fcf5
credit2: Track expected load

As vcpus are migrated, track how we expect the load to change.  This
helps smooth migrations when the balancing doesn't take immediate
effect on the load average.  In theory, if vcpu activity remains
constant, then the measured avgload should converge to the balanced
avgload.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/common/sched_credit2.c |   15 ++++++++++++++-
 1 files changed, 14 insertions(+), 1 deletion(-)

diff -r cf1ea603b340 -r 6a970abb346f xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c	Fri Dec 24 08:30:15 2010 +0000
+++ b/xen/common/sched_credit2.c	Fri Dec 24 08:30:42 2010 +0000
@@ -206,6 +206,7 @@ struct csched_runqueue_data {
     int load;              /* Instantaneous load: Length of queue  + num non-idle \
threads */  s_time_t load_last_update;  /* Last time average was updated */
     s_time_t avgload;           /* Decaying queue load */
+    s_time_t b_avgload;         /* Decaying queue load modified by balancing */
 };
 
 /*
@@ -302,6 +303,7 @@ __update_runq_load(const struct schedule
     if ( rqd->load_last_update + (1ULL<<prv->load_window_shift) < now )
     {
         rqd->avgload = (unsigned long long)rqd->load << prv->load_window_shift;
+        rqd->b_avgload = (unsigned long long)rqd->load << prv->load_window_shift;
     }
     else
     {
@@ -310,6 +312,10 @@ __update_runq_load(const struct schedule
         rqd->avgload =
             ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) \
                )
               + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->avgload ) ) >> \
prv->load_window_shift; +
+        rqd->b_avgload =
+            ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) \
) +              + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->b_avgload ) ) >> \
prv->load_window_shift;  }
     rqd->load += change;
     rqd->load_last_update = now;
@@ -317,11 +323,12 @@ __update_runq_load(const struct schedule
     {
         struct {
             unsigned rq_load:4, rq_avgload:28;
-            unsigned rq_id:4;
+            unsigned rq_id:4, b_avgload:28;
         } d;
         d.rq_id=rqd->id;
         d.rq_load = rqd->load;
         d.rq_avgload = rqd->avgload;
+        d.b_avgload = rqd->b_avgload;
         trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
                   sizeof(d),
                   (unsigned char *)&d);
@@ -756,6 +763,9 @@ __runq_assign(struct csched_vcpu *svc, s
 
     update_max_weight(svc->rqd, svc->weight, 0);
 
+    /* Expected new load based on adding this vcpu */
+    rqd->b_avgload += svc->avgload;
+
     /* TRACE */
     {
         struct {
@@ -789,6 +799,9 @@ __runq_deassign(struct csched_vcpu *svc)
 
     list_del_init(&svc->rqd_elem);
     update_max_weight(svc->rqd, 0, svc->weight);
+
+    /* Expected new load based on removing this vcpu */
+    svc->rqd->b_avgload -= svc->avgload;
 
     svc->rqd = NULL;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic