Skip to content

Commit

Permalink
rapidio: use a reference count for struct mport_dma_req
Browse files Browse the repository at this point in the history
Once the dma request is passed to the DMA engine, the DMA subsystem
would hold a pointer to this structure and could call the completion
callback after do_dma_request() has timed out.

The current code deals with this by putting timed out SYNC requests to a
pending list and freeing them later, when the mport cdev device is
released.  This still does not guarantee that the DMA subsystem is
really done with those transfers, so in theory
dma_xfer_callback/dma_req_free could be called after
mport_cdev_release_dma and could potentially access already freed
memory.

This patch simplifies the current handling by using a kref in the mport
dma request structure, so that it gets freed only when nobody uses it
anymore.

This also simplifies the code a bit, as FAF transfers are now handled in
the same way as SYNC and ASYNC transfers.  There is no need anymore for
the pending list and for the dma workqueue which was used in case of FAF
transfers, so we remove them both.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ioan Nicu <[email protected]>
Acked-by: Alexandre Bounine <[email protected]>
Cc: Barry Wood <[email protected]>
Cc: Matt Porter <[email protected]>
Cc: Christophe JAILLET <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Logan Gunthorpe <[email protected]>
Cc: Chris Wilson <[email protected]>
Cc: Tvrtko Ursulin <[email protected]>
Cc: Frank Kunz <[email protected]>
Cc: Alexander Sverdlin <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Ioan Nicu authored and torvalds committed Apr 11, 2018
1 parent b94bb1f commit bbd876a
Showing 1 changed file with 18 additions and 104 deletions.
122 changes: 18 additions & 104 deletions drivers/rapidio/devices/rio_mport_cdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,6 @@ struct mport_cdev_priv {
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_head async_list;
struct list_head pend_list;
spinlock_t req_lock;
struct mutex dma_lock;
struct kref dma_ref;
Expand Down Expand Up @@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
static struct class *dev_class;
static dev_t dev_number;

static struct workqueue_struct *dma_wq;

static void mport_release_mapping(struct kref *ref);

static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
Expand Down Expand Up @@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
#ifdef CONFIG_RAPIDIO_DMA_ENGINE

struct mport_dma_req {
struct kref refcount;
struct list_head node;
struct file *filp;
struct mport_cdev_priv *priv;
Expand All @@ -554,11 +552,6 @@ struct mport_dma_req {
struct completion req_comp;
};

struct mport_faf_work {
struct work_struct work;
struct mport_dma_req *req;
};

static void mport_release_def_dma(struct kref *dma_ref)
{
struct mport_dev *md =
Expand All @@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
complete(&priv->comp);
}

static void dma_req_free(struct mport_dma_req *req)
static void dma_req_free(struct kref *ref)
{
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
refcount);
struct mport_cdev_priv *priv = req->priv;
unsigned int i;

Expand Down Expand Up @@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
NULL, NULL);
complete(&req->req_comp);
}

static void dma_faf_cleanup(struct work_struct *_work)
{
struct mport_faf_work *work = container_of(_work,
struct mport_faf_work, work);
struct mport_dma_req *req = work->req;

dma_req_free(req);
kfree(work);
}

static void dma_faf_callback(void *param)
{
struct mport_dma_req *req = (struct mport_dma_req *)param;
struct mport_faf_work *work;

work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;

INIT_WORK(&work->work, dma_faf_cleanup);
work->req = req;
queue_work(dma_wq, &work->work);
kref_put(&req->refcount, dma_req_free);
}

/*
Expand Down Expand Up @@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
goto err_out;
}

if (sync == RIO_TRANSFER_FAF)
tx->callback = dma_faf_callback;
else
tx->callback = dma_xfer_callback;
tx->callback = dma_xfer_callback;
tx->callback_param = req;

req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS;
init_completion(&req->req_comp);
kref_get(&req->refcount);

cookie = dmaengine_submit(tx);
req->cookie = cookie;
Expand All @@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
if (dma_submit_error(cookie)) {
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
cookie, xfer->rio_addr, xfer->length);
kref_put(&req->refcount, dma_req_free);
ret = -EIO;
goto err_out;
}
Expand Down Expand Up @@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req)
return -ENOMEM;

kref_init(&req->refcount);

ret = get_dma_channel(priv);
if (ret) {
kfree(req);
Expand Down Expand Up @@ -968,42 +941,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
ret = do_dma_request(req, xfer, sync, nents);

if (ret >= 0) {
if (sync == RIO_TRANSFER_SYNC)
goto sync_out;
return ret; /* return ASYNC cookie */
}

if (ret == -ETIMEDOUT || ret == -EINTR) {
/*
* This can happen only in case of SYNC transfer.
* Do not free unfinished request structure immediately.
* Place it into pending list and deal with it later
*/
spin_lock(&priv->req_lock);
list_add_tail(&req->node, &priv->pend_list);
spin_unlock(&priv->req_lock);
return ret;
if (sync == RIO_TRANSFER_ASYNC)
return ret; /* return ASYNC cookie */
} else {
rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
}


rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
sync_out:
dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
sg_free_table(&req->sgt);
err_pg:
if (page_list) {
if (!req->page_list) {
for (i = 0; i < nr_pages; i++)
put_page(page_list[i]);
kfree(page_list);
}
err_req:
if (req->map) {
mutex_lock(&md->buf_mutex);
kref_put(&req->map->ref, mport_release_mapping);
mutex_unlock(&md->buf_mutex);
}
put_dma_channel(priv);
kfree(req);
kref_put(&req->refcount, dma_req_free);
return ret;
}

Expand Down Expand Up @@ -1121,7 +1072,7 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
ret = 0;

if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
dma_req_free(req);
kref_put(&req->refcount, dma_req_free);

return ret;

Expand Down Expand Up @@ -1966,7 +1917,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)

#ifdef CONFIG_RAPIDIO_DMA_ENGINE
INIT_LIST_HEAD(&priv->async_list);
INIT_LIST_HEAD(&priv->pend_list);
spin_lock_init(&priv->req_lock);
mutex_init(&priv->dma_lock);
#endif
Expand Down Expand Up @@ -2006,8 +1956,6 @@ static void mport_cdev_release_dma(struct file *filp)

md = priv->md;

flush_workqueue(dma_wq);

spin_lock(&priv->req_lock);
if (!list_empty(&priv->async_list)) {
rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
Expand All @@ -2023,20 +1971,7 @@ static void mport_cdev_release_dma(struct file *filp)
req->filp, req->cookie,
completion_done(&req->req_comp)?"yes":"no");
list_del(&req->node);
dma_req_free(req);
}
}

if (!list_empty(&priv->pend_list)) {
rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
filp, current->comm, task_pid_nr(current));
list_for_each_entry_safe(req,
req_next, &priv->pend_list, node) {
rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
req->filp, req->cookie,
completion_done(&req->req_comp)?"yes":"no");
list_del(&req->node);
dma_req_free(req);
kref_put(&req->refcount, dma_req_free);
}
}

Expand All @@ -2048,15 +1983,6 @@ static void mport_cdev_release_dma(struct file *filp)
current->comm, task_pid_nr(current), wret);
}

spin_lock(&priv->req_lock);

if (!list_empty(&priv->pend_list)) {
rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
filp, current->comm, task_pid_nr(current));
}

spin_unlock(&priv->req_lock);

if (priv->dmach != priv->md->dma_chan) {
rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
filp, current->comm, task_pid_nr(current));
Expand Down Expand Up @@ -2573,8 +2499,6 @@ static void mport_cdev_remove(struct mport_dev *md)
cdev_device_del(&md->cdev, &md->dev);
mport_cdev_kill_fasync(md);

flush_workqueue(dma_wq);

/* TODO: do we need to give clients some time to close file
* descriptors? Simple wait for XX, or kref?
*/
Expand Down Expand Up @@ -2691,17 +2615,8 @@ static int __init mport_init(void)
goto err_cli;
}

dma_wq = create_singlethread_workqueue("dma_wq");
if (!dma_wq) {
rmcd_error("failed to create DMA work queue");
ret = -ENOMEM;
goto err_wq;
}

return 0;

err_wq:
class_interface_unregister(&rio_mport_interface);
err_cli:
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
err_chr:
Expand All @@ -2717,7 +2632,6 @@ static void __exit mport_exit(void)
class_interface_unregister(&rio_mport_interface);
class_destroy(dev_class);
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
destroy_workqueue(dma_wq);
}

module_init(mport_init);
Expand Down

0 comments on commit bbd876a

Please sign in to comment.