WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: [PATCH 5/5] xen: release all pages within 1-1 p2m mappin

To: David Vrabel <david.vrabel@xxxxxxxxxx>
Subject: [Xen-devel] Re: [PATCH 5/5] xen: release all pages within 1-1 p2m mappings
From: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Date: Tue, 6 Sep 2011 17:20:46 -0400
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx, David Vrabel <david.vrabel@xxxxxxx>
Delivery-date: Tue, 06 Sep 2011 14:21:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1313765840-22084-6-git-send-email-david.vrabel@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1313765840-22084-1-git-send-email-david.vrabel@xxxxxxxxxx> <1313765840-22084-6-git-send-email-david.vrabel@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.21 (2010-09-15)
On Fri, Aug 19, 2011 at 03:57:20PM +0100, David Vrabel wrote:
> In xen_memory_setup() all reserved regions and gaps are set to an
> identity (1-1) p2m mapping.  If an available page has a PFN within one
> of these 1-1 mappings it will become accessible (as it MFN is lost) so
> release them before setting up the mapping.
> 
> This can make an additional 256 MiB or more of RAM available
> (depending on the size of the reserved regions in the memory map).

.. if the xen_start_info->nr_pages overlaps the reserved region.

> 
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxx>
> ---
>  arch/x86/xen/setup.c |   88 
> ++++++++++++--------------------------------------
>  1 files changed, 21 insertions(+), 67 deletions(-)
> 
> diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
> index 93e4542..0f1cd69 100644
> --- a/arch/x86/xen/setup.c
> +++ b/arch/x86/xen/setup.c
> @@ -123,73 +123,33 @@ static unsigned long __init 
> xen_release_chunk(phys_addr_t start_addr,
>       return len;
>  }
>  
> -static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
> -                                                  const struct e820entry 
> *map,
> -                                                  int nr_map)
> +static unsigned long __init xen_set_identity_and_release(const struct 
> e820entry *list,
> +                                                      ssize_t map_size,
> +                                                      unsigned long nr_pages)
>  {
> -     phys_addr_t max_addr = PFN_PHYS(max_pfn);
> -     phys_addr_t last_end = ISA_END_ADDRESS;
> +     phys_addr_t avail_end = PFN_PHYS(nr_pages);
> +     phys_addr_t last_end = 0;
>       unsigned long released = 0;
> -     int i;
> -
> -     /* Free any unused memory above the low 1Mbyte. */
> -     for (i = 0; i < nr_map && last_end < max_addr; i++) {
> -             phys_addr_t end = map[i].addr;
> -             end = min(max_addr, end);
> -
> -             if (last_end < end)
> -                     released += xen_release_chunk(last_end, end);
> -             last_end = max(last_end, map[i].addr + map[i].size);
> -     }
> -
> -     if (last_end < max_addr)
> -             released += xen_release_chunk(last_end, max_addr);
> -
> -     printk(KERN_INFO "released %lu pages of unused memory\n", released);
> -     return released;
> -}
> -
> -static unsigned long __init xen_set_identity(const struct e820entry *list,
> -                                          ssize_t map_size)
> -{
> -     phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
> -     phys_addr_t start_pci = last;
>       const struct e820entry *entry;
> -     unsigned long identity = 0;
>       int i;
>  
>       for (i = 0, entry = list; i < map_size; i++, entry++) {
> -             phys_addr_t start = entry->addr;
> -             phys_addr_t end = start + entry->size;
> -
> -             if (start < last)
> -                     start = last;
> +             phys_addr_t begin = last_end;

The "begin" is a bit confusing. You are using the previous E820 entry's end - 
not
the beginning of this E820 entry. Doing a s/begin/last_end/ makes
the code a bit easier to understand.

> +             phys_addr_t end = entry->addr + entry->size;
>  
> -             if (end <= start)
> -                     continue;
> +             last_end = end;

Please include the comment:
/* This entry end. */

>  
> -             /* Skip over the 1MB region. */
> -             if (last > end)
> -                     continue;
> +             if (entry->type == E820_RAM || entry->type == E820_UNUSABLE)
> +                     end = entry->addr;

And:
/* Should encapsulate the gap between prev_end and this E820
entry's starting address. */
>  
> -             if ((entry->type == E820_RAM) || (entry->type == 
> E820_UNUSABLE)) {
> -                     if (start > start_pci)
> -                             identity += set_phys_range_identity(
> -                                             PFN_UP(start_pci), 
> PFN_DOWN(start));
> +             if (begin < end) {
> +                     if (begin < avail_end)
> +                             released += xen_release_chunk(begin, min(end, 
> avail_end));
>  
> -                     /* Without saving 'last' we would gooble RAM too
> -                      * at the end of the loop. */
> -                     last = end;
> -                     start_pci = end;
> -                     continue;
> +                     set_phys_range_identity(PFN_UP(begin), PFN_DOWN(end));

identity += set_phys_range ..
>               }
> -             start_pci = min(start, start_pci);
> -             last = end;
>       }
> -     if (last > start_pci)
> -             identity += set_phys_range_identity(
> -                                     PFN_UP(start_pci), PFN_DOWN(last));
> -     return identity;

OK, but you have ripped out the nice printk's that existed before. So add them
back in:


        printk(KERN_INFO "released %lu pages of unused memory\n", released);
        printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity);

as they are quite useful in the field.

> +     return released;
>  }
>  
>  static unsigned long __init xen_get_max_pages(void)
> @@ -217,7 +177,6 @@ char * __init xen_memory_setup(void)
>       struct xen_memory_map memmap;
>       unsigned long max_pages;
>       unsigned long extra_pages = 0;
> -     unsigned long identity_pages = 0;
>       int i;
>       int op;
>  
> @@ -250,7 +209,12 @@ char * __init xen_memory_setup(void)
>       if (max_pages > max_pfn)
>               extra_pages += max_pages - max_pfn;
>  
> -     extra_pages += xen_return_unused_memory(max_pfn, map, 
> memmap.nr_entries);
> +     /*
> +      * Set P2M for all non-RAM pages and E820 gaps to be identity
> +      * type PFNs.  Any RAM pages that would be made inaccesible by
> +      * this are first released.
> +      */
> +     extra_pages += xen_set_identity_and_release(map, memmap.nr_entries, 
> max_pfn);
>  
>       /*
>        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
> @@ -294,10 +258,6 @@ char * __init xen_memory_setup(void)
>        * In domU, the ISA region is normal, usable memory, but we
>        * reserve ISA memory anyway because too many things poke
>        * about in there.
> -      *
> -      * In Dom0, the host E820 information can leave gaps in the
> -      * ISA range, which would cause us to release those pages.  To
> -      * avoid this, we unconditionally reserve them here.
>        */
>       e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
>                       E820_RESERVED);
> @@ -314,12 +274,6 @@ char * __init xen_memory_setup(void)
>  
>       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
>  
> -     /*
> -      * Set P2M for all non-RAM pages and E820 gaps to be identity
> -      * type PFNs.
> -      */
> -     identity_pages = xen_set_identity(e820.map, e820.nr_map);
> -     printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
>       return "Xen";
>  }
>  
> -- 
> 1.7.2.5

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>