On Sun, 2005-11-06 at 15:32 +0000, Keir Fraser wrote:
> On 6 Nov 2005, at 12:24, Harry Butterworth wrote:
>
> > All in all, this looks like a bug to me where failure to map a grant
> > reference in the middle of a set would result in pages kept mapped in
> > the backend when fast_flush_area fails to clean them up.
> >
> > Am I right?
>
> Yes, it's a bug. We'll sort out a patch.
>
OK. I ended up doing the following in my code. I'm not particularly
confident about it but hopefully it's correct. As in blkback, the
phys_to_machine_mapping gets left lying around after unmap. I assume
this is not a problem.
typedef struct xenidc_grant_table_mapping_context_struct
xenidc_grant_table_mapping_context;
struct xenidc_grant_table_mapping_context_struct
{
xenidc_buffer_mappable_class * class;
xenidc_buffer_resource_provider * provider;
unsigned long page_count;
unsigned long mmap_vaddress;
int16_t handle
[ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];
};
static void xenidc_grant_table_unmap_rbr
( xenidc_buffer_mappable_class ** context_in );
static xenidc_buffer_mappable_class ** xenidc_grant_table_map_rbr
(
xenidc_buffer_mappable_class * class,
xenidc_remote_buffer_reference * rbr,
xenidc_address * address,
xenidc_buffer_resource_provider * provider,
void ** mapping
)
{
trace();
{
xenidc_grant_table_mapping_context * context =
xenidc_buffer_resource_provider_allocate_page( provider );
struct gnttab_map_grant_ref map
[ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];
unsigned long first_page = rbr->byte_offset / PAGE_SIZE;
unsigned long final_page =
( rbr->byte_offset + rbr->byte_count - 1 ) / PAGE_SIZE;
context->class = class;
context->provider = provider;
context->page_count = final_page - first_page + 1;
context->mmap_vaddress =
xenidc_buffer_resource_provider_allocate_empty_page_range
( provider, context->page_count );
{
int i;
for( i = 0; i < context->page_count; i++ )
{
map[ i ].host_addr =
context->mmap_vaddress + ( i * PAGE_SIZE );
map[ i ].dom =
xenidc_address_query_remote_domain_id( address );
map[ i ].ref =
rbr->base.grant_table.reference[ first_page + i ];
map[ i ].flags = GNTMAP_host_map;
}
}
{
int error = HYPERVISOR_grant_table_op
( GNTTABOP_map_grant_ref, map, context->page_count );
BUG_ON( error );
}
{
int error = 0;
{
int i;
for( i = 0; i < context->page_count; i++ )
{
context->handle[ i ] = map[ i ].handle;
if( unlikely( map[ i ].handle < 0 ) )
{
error = 1;
}
}
}
if( !error )
{
int i;
for( i = 0; i < context->page_count; i++ )
{
phys_to_machine_mapping
[
__pa( context->mmap_vaddress + ( i *
PAGE_SIZE ) )
>>
PAGE_SHIFT
]
=
FOREIGN_FRAME( map[ i ].dev_bus_addr >>
PAGE_SHIFT );
}
}
else
{
xenidc_grant_table_unmap_rbr( &context->class );
context = NULL;
}
}
if( context != NULL )
{
*mapping =
(void *)( context->mmap_vaddress + ( rbr->byte_offset %
PAGE_SIZE ) );
return &context->class;
}
else
{
return NULL;
}
}
}
static void xenidc_grant_table_unmap_rbr
( xenidc_buffer_mappable_class ** context_in )
{
trace();
{
xenidc_grant_table_mapping_context * context = container_of
( context_in, xenidc_grant_table_mapping_context, class );
{
struct gnttab_unmap_grant_ref unmap
[ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];
int i;
int j;
for( i = 0, j = 0; i < context->page_count; i++ )
{
if( context->handle[ i ] >= 0 )
{
unmap[ j ].host_addr =
context->mmap_vaddress + ( i * PAGE_SIZE );
unmap[ j ].dev_bus_addr = 0;
unmap[ j ].handle = context->handle[ i ];
j++;
}
}
if( j != 0 )
{
int error = HYPERVISOR_grant_table_op
( GNTTABOP_unmap_grant_ref, unmap, j );
BUG_ON( error );
}
}
xenidc_buffer_resource_provider_free_empty_page_range
( context->provider, context->mmap_vaddress );
xenidc_buffer_resource_provider_free_page
( context->provider, context );
}
}
> -- Keir
>
>
--
Harry Butterworth <harry@xxxxxxxxxxxxxxxxxxxxxxxxxxxxx>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|