WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: linux: try harder to balloon up under memory pressure.

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] Re: linux: try harder to balloon up under memory pressure.
From: Ian Campbell <Ian.Campbell@xxxxxxxxxx>
Date: Fri, 5 Jun 2009 11:59:31 +0100
Cc: Jeremy Fitzhardinge <jeremy@xxxxxxxx>, Keir Fraser <Keir.Fraser@xxxxxxxxxxxxx>
Delivery-date: Fri, 05 Jun 2009 04:00:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1244199426.27370.158.camel@xxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Citrix Systems, Inc.
References: <1244199426.27370.158.camel@xxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
On Fri, 2009-06-05 at 06:57 -0400, Ian Campbell wrote:
> 
> This patch is for the 2.6.18-xen tree.

Same patch for pvops kernel

Subject: xen: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 drivers/xen/balloon.c |   31 +++++--------------------------
 1 files changed, 5 insertions(+), 26 deletions(-)

diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index c83da03..5287d09 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -66,8 +66,6 @@ struct balloon_stats {
        /* We aim for 'current allocation' == 'target allocation'. */
        unsigned long current_pages;
        unsigned long target_pages;
-       /* We may hit the hard limit in Xen. If we do then we remember it. */
-       unsigned long hard_limit;
        /*
         * Drivers may alter the memory reservation independently, but they
         * must inform the balloon driver so we avoid hitting the hard limit.
@@ -178,7 +176,7 @@ static void balloon_alarm(unsigned long unused)
 
 static unsigned long current_target(void)
 {
-       unsigned long target = min(balloon_stats.target_pages, 
balloon_stats.hard_limit);
+       unsigned long target = balloon_stats.target_pages;
 
        target = min(target,
                     balloon_stats.current_pages +
@@ -214,23 +212,10 @@ static int increase_reservation(unsigned long nr_pages)
        set_xen_guest_handle(reservation.extent_start, frame_list);
        reservation.nr_extents = nr_pages;
        rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
-       if (rc < nr_pages) {
-               if (rc > 0) {
-                       int ret;
-
-                       /* We hit the Xen hard limit: reprobe. */
-                       reservation.nr_extents = rc;
-                       ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-                                                  &reservation);
-                       BUG_ON(ret != rc);
-               }
-               if (rc >= 0)
-                       balloon_stats.hard_limit = (balloon_stats.current_pages 
+ rc -
-                                                   balloon_stats.driver_pages);
+       if (rc < 0)
                goto out;
-       }
 
-       for (i = 0; i < nr_pages; i++) {
+       for (i = 0; i < rc; i++) {
                page = balloon_retrieve();
                BUG_ON(page == NULL);
 
@@ -256,13 +241,13 @@ static int increase_reservation(unsigned long nr_pages)
                __free_page(page);
        }
 
-       balloon_stats.current_pages += nr_pages;
+       balloon_stats.current_pages += rc;
        totalram_pages = balloon_stats.current_pages;
 
  out:
        spin_unlock_irqrestore(&xen_reservation_lock, flags);
 
-       return 0;
+       return rc < 0 ? rc : rc != nr_pages;
 }
 
 static int decrease_reservation(unsigned long nr_pages)
@@ -364,7 +349,6 @@ static void balloon_process(struct work_struct *work)
 static void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
-       balloon_stats.hard_limit   = ~0UL;
        balloon_stats.target_pages = target;
        schedule_work(&balloon_worker);
 }
@@ -424,7 +408,6 @@ static int __init balloon_init(void)
        balloon_stats.balloon_low   = 0;
        balloon_stats.balloon_high  = 0;
        balloon_stats.driver_pages  = 0UL;
-       balloon_stats.hard_limit    = ~0UL;
 
        init_timer(&balloon_timer);
        balloon_timer.data = 0;
@@ -564,9 +547,6 @@ EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
 BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
 BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
 BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-BALLOON_SHOW(hard_limit_kb,
-            (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n",
-            (balloon_stats.hard_limit!=~0UL) ? 
PAGES2KB(balloon_stats.hard_limit) : 0);
 BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
 
 static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute 
*attr,
@@ -635,7 +615,6 @@ static struct attribute *balloon_info_attrs[] = {
        &attr_current_kb.attr,
        &attr_low_kb.attr,
        &attr_high_kb.attr,
-       &attr_hard_limit_kb.attr,
        &attr_driver_kb.attr,
        NULL
 };
-- 
1.5.6.5




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel