[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/6] xen-gntdev: Change page limit to be global instead of per-open



On Tue, 2010-12-14 at 21:42 +0000, Daniel De Graaf wrote:
> On 12/14/2010 04:12 PM, Jeremy Fitzhardinge wrote:
> > On 12/14/2010 06:55 AM, Daniel De Graaf wrote:
> >> Because there is no limitation on how many times a user can open a
> >> given device file, an per-file-description limit on the number of
> >> pages granted offers little to no benefit. Change to a global limit
> >> and remove the ioctl() as the parameter can now be changed via sysfs.
> > 
> > Does anyone use this ioctl?  Wouldn't it be safer to replace it with a
> > no-op version?
> > 
> >     J
> > 
> 
> I do not know of any users. If it's preferred to replace with a noop ioctl
> instead of the current -ENOSYS return, that's easy to do.

It's called by xc_gnttab_set_max_grants in libxc, although I don't see
any callers of that function.

We should probably also remove the library function.

Ian.

> >> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> >> ---
> >>  drivers/xen/gntdev.c |   48 
> >> ++++++++++++++----------------------------------
> >>  1 files changed, 14 insertions(+), 34 deletions(-)
> >>
> >> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> >> index c582804..62639af 100644
> >> --- a/drivers/xen/gntdev.c
> >> +++ b/drivers/xen/gntdev.c
> >> @@ -44,13 +44,12 @@ MODULE_DESCRIPTION("User-space granted page access 
> >> driver");
> >>  
> >>  static int debug = 0;
> >>  module_param(debug, int, 0644);
> >> -static int limit = 1024;
> >> +static int limit = 1024*1024;
> >>  module_param(limit, int, 0644);
> >> +static atomic_t pages_mapped = ATOMIC_INIT(0);
> >>  
> >>  struct gntdev_priv {
> >>    struct list_head maps;
> >> -  uint32_t used;
> >> -  uint32_t limit;
> >>    spinlock_t lock;
> >>    struct mm_struct *mm;
> >>    struct mmu_notifier mn;
> >> @@ -76,8 +75,8 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
> >>  {
> >>    struct grant_map *map;
> >>  
> >> -  printk("%s: maps list (priv %p, usage %d/%d)\n",
> >> -         __FUNCTION__, priv, priv->used, priv->limit);
> >> +  printk("%s: maps list (priv %p)\n",
> >> +         __FUNCTION__, priv);
> >>    list_for_each_entry(map, &priv->maps, next)
> >>            printk("  index %2d, count %2d %s\n",
> >>                   map->index, map->count,
> >> @@ -104,9 +103,6 @@ static struct grant_map *gntdev_alloc_map(struct 
> >> gntdev_priv *priv, int count)
> >>    add->count = count;
> >>    add->priv  = priv;
> >>  
> >> -  if (add->count + priv->used > priv->limit)
> >> -          goto err;
> >> -
> >>    return add;
> >>  
> >>  err:
> >> @@ -131,7 +127,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, 
> >> struct grant_map *add)
> >>    list_add_tail(&add->next, &priv->maps);
> >>  
> >>  done:
> >> -  priv->used += add->count;
> >>    if (debug)
> >>            gntdev_print_maps(priv, "[new]", add->index);
> >>  }
> >> @@ -178,7 +173,7 @@ static int gntdev_del_map(struct grant_map *map)
> >>            if (map->unmap_ops[i].handle)
> >>                    return -EBUSY;
> >>  
> >> -  map->priv->used -= map->count;
> >> +  atomic_sub(map->count, &pages_mapped);
> >>    list_del(&map->next);
> >>    return 0;
> >>  }
> >> @@ -360,7 +355,6 @@ static int gntdev_open(struct inode *inode, struct 
> >> file *flip)
> >>  
> >>    INIT_LIST_HEAD(&priv->maps);
> >>    spin_lock_init(&priv->lock);
> >> -  priv->limit = limit;
> >>  
> >>    priv->mm = get_task_mm(current);
> >>    if (!priv->mm) {
> >> @@ -416,19 +410,26 @@ static long gntdev_ioctl_map_grant_ref(struct 
> >> gntdev_priv *priv,
> >>                   op.count);
> >>    if (unlikely(op.count <= 0))
> >>            return -EINVAL;
> >> -  if (unlikely(op.count > priv->limit))
> >> -          return -EINVAL;
> >>  
> >>    err = -ENOMEM;
> >>    map = gntdev_alloc_map(priv, op.count);
> >>    if (!map)
> >>            return err;
> >> +
> >>    if (copy_from_user(map->grants, &u->refs,
> >>                       sizeof(map->grants[0]) * op.count) != 0) {
> >>            gntdev_free_map(map);
> >>            return err;
> >>    }
> >>  
> >> +  if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit))
> >> +  {
> >> +          if (debug)
> >> +                  printk("%s: can't map: over limit\n", __FUNCTION__);
> >> +          gntdev_free_map(map);
> >> +          return err;
> >> +  }
> >> +
> >>    spin_lock(&priv->lock);
> >>    gntdev_add_map(priv, map);
> >>    op.index = map->index << PAGE_SHIFT;
> >> @@ -495,24 +496,6 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct 
> >> gntdev_priv *priv,
> >>    return 0;
> >>  }
> >>  
> >> -static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
> >> -                                  struct ioctl_gntdev_set_max_grants 
> >> __user *u)
> >> -{
> >> -  struct ioctl_gntdev_set_max_grants op;
> >> -
> >> -  if (copy_from_user(&op, u, sizeof(op)) != 0)
> >> -          return -EFAULT;
> >> -  if (debug)
> >> -          printk("%s: priv %p, limit %d\n", __FUNCTION__, priv, op.count);
> >> -  if (op.count > limit)
> >> -          return -EINVAL;
> >> -
> >> -  spin_lock(&priv->lock);
> >> -  priv->limit = op.count;
> >> -  spin_unlock(&priv->lock);
> >> -  return 0;
> >> -}
> >> -
> >>  static long gntdev_ioctl(struct file *flip,
> >>                     unsigned int cmd, unsigned long arg)
> >>  {
> >> @@ -529,9 +512,6 @@ static long gntdev_ioctl(struct file *flip,
> >>    case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
> >>            return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
> >>  
> >> -  case IOCTL_GNTDEV_SET_MAX_GRANTS:
> >> -          return gntdev_ioctl_set_max_grants(priv, ptr);
> >> -
> >>    default:
> >>            if (debug)
> >>                    printk("%s: priv %p, unknown cmd %x\n",
> > 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.