WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH V3 2/3] xen-blkfront: teach blkfront driver handl

On Wed, Aug 24, 2011 at 6:42 PM, Jan Beulich <JBeulich@xxxxxxxxxx> wrote:
>>>> On 24.08.11 at 11:23, Li Dongyang <lidongyang@xxxxxxxxxx> wrote:
>> The blkfront driver now will read feature-trim from xenstore,
>> and set up the request queue with trim params, then we can forward the
>> discard requests to backend driver.
>>
>> Signed-off-by: Li Dongyang <lidongyang@xxxxxxxxxx>
>> ---
>>  drivers/block/xen-blkfront.c |  111 
>> +++++++++++++++++++++++++++++++++---------
>>  1 files changed, 88 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
>> index 9ea8c25..aa3cede 100644
>> --- a/drivers/block/xen-blkfront.c
>> +++ b/drivers/block/xen-blkfront.c
>> @@ -98,6 +98,9 @@ struct blkfront_info
>>       unsigned long shadow_free;
>>       unsigned int feature_flush;
>>       unsigned int flush_op;
>> +     unsigned int feature_trim;
>> +     unsigned int discard_granularity;
>> +     unsigned int discard_alignment;
>>       int is_ready;
>>  };
>>
>> @@ -302,29 +305,36 @@ static int blkif_queue_request(struct request *req)
>>               ring_req->operation = info->flush_op;
>>       }
>>
>> -     ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
>> -     BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
>> +     if (unlikely(req->cmd_flags & REQ_DISCARD)) {
>> +             /* id, sector_number and handle are set above. */
>> +             ring_req->operation = BLKIF_OP_TRIM;
>> +             ring_req->nr_segments = 0;
>> +             ring_req->u.trim.nr_sectors = blk_rq_sectors(req);
>> +     } else {
>> +             ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
>> +             BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
>>
>> -     for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
>> -             buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
>> -             fsect = sg->offset >> 9;
>> -             lsect = fsect + (sg->length >> 9) - 1;
>> -             /* install a grant reference. */
>> -             ref = gnttab_claim_grant_reference(&gref_head);
>> -             BUG_ON(ref == -ENOSPC);
>> +             for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
>> +                     buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
>> +                     fsect = sg->offset >> 9;
>> +                     lsect = fsect + (sg->length >> 9) - 1;
>> +                     /* install a grant reference. */
>> +                     ref = gnttab_claim_grant_reference(&gref_head);
>> +                     BUG_ON(ref == -ENOSPC);
>>
>> -             gnttab_grant_foreign_access_ref(
>> -                             ref,
>> -                             info->xbdev->otherend_id,
>> -                             buffer_mfn,
>> -                             rq_data_dir(req) );
>> -
>> -             info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
>> -             ring_req->u.rw.seg[i] =
>> -                             (struct blkif_request_segment) {
>> -                                     .gref       = ref,
>> -                                     .first_sect = fsect,
>> -                                     .last_sect  = lsect };
>> +                     gnttab_grant_foreign_access_ref(
>> +                                     ref,
>> +                                     info->xbdev->otherend_id,
>> +                                     buffer_mfn,
>> +                                     rq_data_dir(req));
>> +
>> +                     info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
>> +                     ring_req->u.rw.seg[i] =
>> +                                     (struct blkif_request_segment) {
>> +                                             .gref       = ref,
>> +                                             .first_sect = fsect,
>> +                                             .last_sect  = lsect };
>> +             }
>>       }
>>
>>       info->ring.req_prod_pvt++;
>> @@ -399,6 +409,7 @@ wait:
>>  static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
>>  {
>>       struct request_queue *rq;
>> +     struct blkfront_info *info = gd->private_data;
>>
>>       rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
>>       if (rq == NULL)
>> @@ -406,6 +417,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16
>> sector_size)
>>
>>       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
>>
>> +     if (info->feature_trim) {
>> +             queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
>> +             blk_queue_max_discard_sectors(rq, get_capacity(gd));
>> +             rq->limits.discard_granularity = info->discard_granularity;
>> +             rq->limits.discard_alignment = info->discard_alignment;
>
> Don't you also need to set rq->limits.max_discard_sectors here (since
> when zero blkdev_issue_discard() doesn't do anything)? And wouldn't
> that need to be propagated from the backend, too?
the max_discard_sectors are set by blk_queue_max_discard_sectors() above ;-)
rq->limits.max_discard_sectors is the full phy device size, and if we
only assign
a partition to guest, the number is incorrect for guest, so the
max_discard_sectors should
be the capacity the guest will see, Thanks
>
>> +     }
>> +
>>       /* Hard sector size and max sectors impersonate the equiv. hardware. */
>>       blk_queue_logical_block_size(rq, sector_size);
>>       blk_queue_max_hw_sectors(rq, 512);
>> @@ -722,6 +740,19 @@ static irqreturn_t blkif_interrupt(int irq, void
>> *dev_id)
>>
>>               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
>>               switch (bret->operation) {
>> +             case BLKIF_OP_TRIM:
>> +                     if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
>> +                             struct request_queue *rq = info->rq;
>> +                             printk(KERN_WARNING "blkfront: %s: trim op 
>> failed\n",
>> +                                        info->gd->disk_name);
>> +                             error = -EOPNOTSUPP;
>> +                             info->feature_trim = 0;
>> +                             spin_lock(rq->queue_lock);
>> +                             queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
>> +                             spin_unlock(rq->queue_lock);
>> +                     }
>> +                     __blk_end_request_all(req, error);
>> +                     break;
>>               case BLKIF_OP_FLUSH_DISKCACHE:
>>               case BLKIF_OP_WRITE_BARRIER:
>>                       if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
>> @@ -1098,6 +1129,33 @@ blkfront_closing(struct blkfront_info *info)
>>       bdput(bdev);
>>  }
>>
>> +static void blkfront_setup_trim(struct blkfront_info *info)
>> +{
>> +     int err;
>> +     char *type;
>> +     unsigned int discard_granularity;
>> +     unsigned int discard_alignment;
>> +
>> +     type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
>> +     if (IS_ERR(type))
>> +             return;
>> +
>> +     if (strncmp(type, "phy", 3) == 0) {
>> +             err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
>> +                     "discard-granularity", "%u", &discard_granularity,
>> +                     "discard-alignment", "%u", &discard_alignment,
>> +                     NULL);
>
> Let me repeat my wish to have these nodes start with "trim-" rather
> than "discard-", so they can be easily associated with the "feature-trim"
> one.
>
> Jan
>
>> +             if (!err) {
>> +                     info->feature_trim = 1;
>> +                     info->discard_granularity = discard_granularity;
>> +                     info->discard_alignment = discard_alignment;
>> +             }
>> +     } else if (strncmp(type, "file", 4) == 0)
>> +             info->feature_trim = 1;
>> +
>> +     kfree(type);
>> +}
>> +
>>  /*
>>   * Invoked when the backend is finally 'ready' (and has told produced
>>   * the details about the physical device - #sectors, size, etc).
>> @@ -1108,7 +1166,7 @@ static void blkfront_connect(struct blkfront_info
>> *info)
>>       unsigned long sector_size;
>>       unsigned int binfo;
>>       int err;
>> -     int barrier, flush;
>> +     int barrier, flush, trim;
>>
>>       switch (info->connected) {
>>       case BLKIF_STATE_CONNECTED:
>> @@ -1178,7 +1236,14 @@ static void blkfront_connect(struct blkfront_info
>> *info)
>>               info->feature_flush = REQ_FLUSH;
>>               info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
>>       }
>> -
>> +
>> +     err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
>> +                         "feature-trim", "%d", &trim,
>> +                         NULL);
>> +
>> +     if (!err && trim)
>> +             blkfront_setup_trim(info);
>> +
>>       err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
>>       if (err) {
>>               xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
>
>
>
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>