lkml.org 
[lkml]   [2020]   [Oct]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[PATCH v4 15/17] dmaengine: idxd: add dedicated wq mdev type
    From
    Date
    Add the support code for "1dwq" mdev type. This mdev type follows the
    standard VFIO mdev flow. The "1dwq" type will export a single dedicated wq
    to the mdev. The dwq will have read-only configuration that is configured
    by the host. The mdev type does not support PASID and SVA and will match
    the stage 1 driver in functional support. For backward compatibility, the
    mdev will maintain the DSA spec definition of this mdev type once the
    commit goes upstream.

    Signed-off-by: Dave Jiang <dave.jiang@intel.com>
    ---
    drivers/dma/idxd/mdev.c | 141 ++++++++++++++++++++++++++++++++++++++++++++---
    1 file changed, 133 insertions(+), 8 deletions(-)

    diff --git a/drivers/dma/idxd/mdev.c b/drivers/dma/idxd/mdev.c
    index ed79c85e692e..16b56f8f7fc1 100644
    --- a/drivers/dma/idxd/mdev.c
    +++ b/drivers/dma/idxd/mdev.c
    @@ -111,20 +111,58 @@ static void idxd_vdcm_release(struct mdev_device *mdev)
    mutex_unlock(&vidxd->dev_lock);
    }

    +static struct idxd_wq *find_any_dwq(struct idxd_device *idxd)
    +{
    + int i;
    + struct idxd_wq *wq;
    + unsigned long flags;
    +
    + spin_lock_irqsave(&idxd->dev_lock, flags);
    + for (i = 0; i < idxd->max_wqs; i++) {
    + wq = &idxd->wqs[i];
    +
    + if (wq->state != IDXD_WQ_ENABLED)
    + continue;
    +
    + if (!wq_dedicated(wq))
    + continue;
    +
    + if (idxd_wq_refcount(wq) != 0)
    + continue;
    +
    + spin_unlock_irqrestore(&idxd->dev_lock, flags);
    + mutex_lock(&wq->wq_lock);
    + if (idxd_wq_refcount(wq)) {
    + spin_lock_irqsave(&idxd->dev_lock, flags);
    + continue;
    + }
    +
    + idxd_wq_get(wq);
    + mutex_unlock(&wq->wq_lock);
    + return wq;
    + }
    +
    + spin_unlock_irqrestore(&idxd->dev_lock, flags);
    + return NULL;
    +}
    +
    static struct vdcm_idxd *vdcm_vidxd_create(struct idxd_device *idxd, struct mdev_device *mdev,
    struct vdcm_idxd_type *type)
    {
    struct vdcm_idxd *vidxd;
    struct idxd_wq *wq = NULL;
    + int rc;

    - /* PLACEHOLDER, wq matching comes later */
    -
    + if (type->type == IDXD_MDEV_TYPE_1_DWQ)
    + wq = find_any_dwq(idxd);
    if (!wq)
    return ERR_PTR(-ENODEV);

    vidxd = kzalloc(sizeof(*vidxd), GFP_KERNEL);
    - if (!vidxd)
    - return ERR_PTR(-ENOMEM);
    + if (!vidxd) {
    + rc = -ENOMEM;
    + goto err;
    + }

    mutex_init(&vidxd->dev_lock);
    vidxd->idxd = idxd;
    @@ -135,14 +173,23 @@ static struct vdcm_idxd *vdcm_vidxd_create(struct idxd_device *idxd, struct mdev
    vidxd->num_wqs = VIDXD_MAX_WQS;

    idxd_vdcm_init(vidxd);
    - mutex_lock(&wq->wq_lock);
    - idxd_wq_get(wq);
    - mutex_unlock(&wq->wq_lock);

    return vidxd;
    +
    + err:
    + mutex_lock(&wq->wq_lock);
    + idxd_wq_put(wq);
    + mutex_unlock(&wq->wq_lock);
    + return ERR_PTR(rc);
    }

    -static struct vdcm_idxd_type idxd_mdev_types[IDXD_MDEV_TYPES];
    +static struct vdcm_idxd_type idxd_mdev_types[IDXD_MDEV_TYPES] = {
    + {
    + .name = "1dwq-v1",
    + .description = "IDXD MDEV with 1 dedicated workqueue",
    + .type = IDXD_MDEV_TYPE_1_DWQ,
    + },
    +};

    static struct vdcm_idxd_type *idxd_vdcm_find_vidxd_type(struct device *dev,
    const char *name)
    @@ -934,7 +981,85 @@ static long idxd_vdcm_ioctl(struct mdev_device *mdev, unsigned int cmd,
    return rc;
    }

    +static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
    +{
    + struct vdcm_idxd_type *type;
    +
    + type = idxd_vdcm_find_vidxd_type(dev, kobject_name(kobj));
    +
    + if (type)
    + return sprintf(buf, "%s\n", type->description);
    +
    + return -EINVAL;
    +}
    +static MDEV_TYPE_ATTR_RO(name);
    +
    +static int find_available_mdev_instances(struct idxd_device *idxd, struct vdcm_idxd_type *type)
    +{
    + int count = 0, i;
    + unsigned long flags;
    +
    + if (type->type != IDXD_MDEV_TYPE_1_DWQ)
    + return 0;
    +
    + spin_lock_irqsave(&idxd->dev_lock, flags);
    + for (i = 0; i < idxd->max_wqs; i++) {
    + struct idxd_wq *wq;
    +
    + wq = &idxd->wqs[i];
    + if (!is_idxd_wq_mdev(wq) || !wq_dedicated(wq) || idxd_wq_refcount(wq))
    + continue;
    +
    + count++;
    + }
    + spin_unlock_irqrestore(&idxd->dev_lock, flags);
    +
    + return count;
    +}
    +
    +static ssize_t available_instances_show(struct kobject *kobj,
    + struct device *dev, char *buf)
    +{
    + int count;
    + struct idxd_device *idxd = dev_get_drvdata(dev);
    + struct vdcm_idxd_type *type;
    +
    + type = idxd_vdcm_find_vidxd_type(dev, kobject_name(kobj));
    + if (!type)
    + return -EINVAL;
    +
    + count = find_available_mdev_instances(idxd, type);
    +
    + return sprintf(buf, "%d\n", count);
    +}
    +static MDEV_TYPE_ATTR_RO(available_instances);
    +
    +static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
    + char *buf)
    +{
    + return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
    +}
    +static MDEV_TYPE_ATTR_RO(device_api);
    +
    +static struct attribute *idxd_mdev_types_attrs[] = {
    + &mdev_type_attr_name.attr,
    + &mdev_type_attr_device_api.attr,
    + &mdev_type_attr_available_instances.attr,
    + NULL,
    +};
    +
    +static struct attribute_group idxd_mdev_type_group0 = {
    + .name = "1dwq-v1",
    + .attrs = idxd_mdev_types_attrs,
    +};
    +
    +static struct attribute_group *idxd_mdev_type_groups[] = {
    + &idxd_mdev_type_group0,
    + NULL,
    +};
    +
    static const struct mdev_parent_ops idxd_vdcm_ops = {
    + .supported_type_groups = idxd_mdev_type_groups,
    .create = idxd_vdcm_create,
    .remove = idxd_vdcm_remove,
    .open = idxd_vdcm_open,

    \
     
     \ /
      Last update: 2020-10-30 19:54    [W:4.038 / U:0.240 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site