|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH] xen/arm: skip holes in physical address space when setting up frametable
HI Michal,
>>> +void __init init_frametable(paddr_t ram_start)
>>> +{
>>> + unsigned int sidx, nidx, max_idx;
>>>
>>> /*
>>> * The size of paddr_t should be sufficient for the complete range of
>>> @@ -26,24 +47,34 @@ void __init setup_frametable_mappings(paddr_t ps,
>>> paddr_t pe)
>>> BUILD_BUG_ON((sizeof(paddr_t) * BITS_PER_BYTE) < PADDR_BITS);
>>> BUILD_BUG_ON(sizeof(struct page_info) != PAGE_INFO_SIZE);
>>>
>>> - if ( frametable_size > FRAMETABLE_SIZE )
>>> - panic("The frametable cannot cover the physical region
>>> %#"PRIpaddr" - %#"PRIpaddr"\n",
>>> - ps, pe);
>>> + max_idx = DIV_ROUND_UP(max_pdx, PDX_GROUP_COUNT);
>>> + frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ram_start));
>>>
>>> - frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps));
>>> - /* Round up to 2M or 32M boundary, as appropriate. */
>>> - frametable_size = ROUNDUP(frametable_size, mapping_size);
>>> - base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT,
>>> 32<<(20-12));
>>> + /*
>>> + * pdx_to_page(pdx_s) in init_frametable_chunk must be page-aligned
>>> + * for map_pages_to_xen(). Aligning to PDX_GROUP_COUNT guarantees this
>>> + * because PDX_GROUP_COUNT * sizeof(page_info) is always a multiple of
>>> + * PAGE_SIZE by construction.
>>> + */
>>> + frametable_base_pdx = ROUNDDOWN(frametable_base_pdx, PDX_GROUP_COUNT);
>>
>> We are now rounding down frametable_base_pdx which before this patch it was
>> the start of the ram,
>> but in xen/xen/arch/arm/include/asm/mm.h, mfn_valid(mfn) is using
>> frametable_base_pdx to check for
>> mfn validity, this means that we could pass an mfn before the start of the
>> ram and if __mfn_valid is happy,
>> we are getting a regression.
>>
>> Can this happen or am I missing something?
> mfn_valid() can indeed return true for an MFN below ram_start that falls
> in the same PDX group, but this is safe. init_frametable_chunk() maps
> and zeroes the frametable for that range, so mfn_to_page() won't fault.
> The zeroed page_info has count_info == 0 and no owner, so any get_page()
> call on it will fail — the page is effectively inert.
Yes, I’ve checked and many path relying on mfn_valid() go also through
mfn_to_page()
and/or get_page(), there is only one in process_shm() that potentially could
add a shared memory page
given that we are relaxing mfn_valid now.
I’m trying also to follow is_iomem_page(), to check if subsequent mfn_to_page()
fail safely, but I think that depending
on that (mfn_valid) the page will be only treated differently, not sure if it’s
a latent bug to leave mfn_valid() as it is.
Would it be valid to have something like mfn_to_page() != 0 to be part of
mfn_valid() to ensure it’s a real host ram page?
I’m truly asking here because I didn’t check if it’s doable.
Otherwise we could split the round down and the frametable_base_pdx in this way
maybe?
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 605ec1bcd108..a6802d331178 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -183,6 +183,7 @@ struct page_info
/* PDX of the first page in the frame table. */
extern unsigned long frametable_base_pdx;
+extern unsigned long frametable_base_grp;
#define PDX_GROUP_SHIFT SECOND_SHIFT
@@ -228,9 +229,9 @@ static inline void __iomem *ioremap_wc(paddr_t start,
size_t len)
/* Convert between machine frame numbers and page-info structures. */
#define mfn_to_page(mfn) \
- (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx))
+ (frame_table + (mfn_to_pdx(mfn) - frametable_base_grp))
#define page_to_mfn(pg) \
- pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
+ pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_grp)
/* Convert between machine addresses and page-info structures. */
#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
diff --git a/xen/arch/arm/include/asm/mmu/mm.h
b/xen/arch/arm/include/asm/mmu/mm.h
index 7f4d59137d0d..48a0e342307b 100644
--- a/xen/arch/arm/include/asm/mmu/mm.h
+++ b/xen/arch/arm/include/asm/mmu/mm.h
@@ -88,7 +88,7 @@ static inline struct page_info *virt_to_page(const void *v)
pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
pdx += mfn_to_pdx(directmap_mfn_start);
- return frame_table + pdx - frametable_base_pdx;
+ return frame_table + pdx - frametable_base_grp;
}
/*
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index e6b651956927..765896ec2c32 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -28,6 +28,8 @@
unsigned long frametable_base_pdx __read_mostly;
+unsigned long frametable_base_grp __read_mostly;
+
#if defined(CONFIG_ARM_64) || defined(CONFIG_MPU)
void __init setup_mm(void)
{
diff --git a/xen/arch/arm/mmu/mm.c b/xen/arch/arm/mmu/mm.c
index 4b4da349c16c..11c47c2d0e25 100644
--- a/xen/arch/arm/mmu/mm.c
+++ b/xen/arch/arm/mmu/mm.c
@@ -9,7 +9,7 @@
#include <xen/string.h>
#undef pdx_to_page
-#define pdx_to_page(pdx) gcc11_wrap(frame_table + ((pdx) -
frametable_base_pdx))
+#define pdx_to_page(pdx) gcc11_wrap(frame_table + ((pdx) -
frametable_base_grp))
static void __init
init_frametable_chunk(unsigned long pdx_s, unsigned long pdx_e)
@@ -56,12 +56,12 @@ void __init init_frametable(paddr_t ram_start)
* because PDX_GROUP_COUNT * sizeof(page_info) is always a multiple of
* PAGE_SIZE by construction.
*/
- frametable_base_pdx = ROUNDDOWN(frametable_base_pdx, PDX_GROUP_COUNT);
+ frametable_base_grp = ROUNDDOWN(frametable_base_pdx, PDX_GROUP_COUNT);
- if ( (max_pdx - frametable_base_pdx) > FRAMETABLE_NR )
+ if ( (max_pdx - frametable_base_grp) > FRAMETABLE_NR )
panic("Frametable too small\n");
- for ( sidx = (frametable_base_pdx / PDX_GROUP_COUNT); ; sidx = nidx )
+ for ( sidx = (frametable_base_grp / PDX_GROUP_COUNT); ; sidx = nidx )
{
unsigned int eidx;
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index bd7b02fd33b5..870bfd2be5e7 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -207,6 +207,7 @@ void __init init_frametable(paddr_t ram_start)
BUILD_BUG_ON(sizeof(struct page_info) != PAGE_INFO_SIZE);
frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ram_start));
+ frametable_base_grp = frametable_base_pdx;
nr_pdxs = max_pdx - frametable_base_pdx;
frametable_size = nr_pdxs * sizeof(struct page_info);
Not sure! Maybe another maintainer can give another opinion if I’m overthinking
too much on mfn_valid()
Cheers,
Luca
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |