summaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
authorEzequiel Garcia <ezequiel@collabora.com>2018-10-18 14:02:23 -0400
committerMauro Carvalho Chehab <mchehab+samsung@kernel.org>2018-11-23 06:37:47 -0500
commitcbd9463da1b12cdf9aa79e7cf470431d39131fca (patch)
treecf8ef58c3ba7e2125e67f23bb2305d596e166cdc /drivers/media/v4l2-core
parentcbec2836f8be61ca573d48efe8803929100d4cba (diff)
downloadtalos-op-linux-cbd9463da1b12cdf9aa79e7cf470431d39131fca.tar.gz
talos-op-linux-cbd9463da1b12cdf9aa79e7cf470431d39131fca.zip
media: v4l2-mem2mem: Avoid calling .device_run in v4l2_m2m_job_finish
v4l2_m2m_job_finish() is typically called when DMA operations complete, in interrupt handlers or DMA completion callbacks. Calling .device_run from v4l2_m2m_job_finish creates a nasty re-entrancy path into the driver. Moreover, some implementation of .device_run might need to sleep, as is the case for drivers supporting the Request API, where controls are applied via v4l2_ctrl_request_setup, which takes the ctrl handler mutex. This commit adds a deferred context that calls v4l2_m2m_try_run, and gets scheduled by v4l2_m2m_job_finish(). Before this change, device_run would be called from these paths: vb2_m2m_request_queue, or v4l2_m2m_streamon, or v4l2_m2m_qbuf v4l2_m2m_try_schedule v4l2_m2m_try_run .device_run v4l2_m2m_job_finish v4l2_m2m_try_run .device_run After this change, the latter is now gone and instead: v4l2_m2m_device_run_work v4l2_m2m_try_run .device_run Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 2307fcc663ec..5bbdec55b7d7 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -87,6 +87,7 @@ static const char * const m2m_entity_name[] = {
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
+ * @job_work: worker to run queued jobs.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
@@ -103,6 +104,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
+ struct work_struct job_work;
const struct v4l2_m2m_ops *m2m_ops;
};
@@ -244,6 +246,9 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
* @m2m_dev: per-device context
*
* Get next transaction (if present) from the waiting jobs list and run it.
+ *
+ * Note that this function can run on a given v4l2_m2m_ctx context,
+ * but call .device_run for another context.
*/
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{
@@ -363,6 +368,18 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
/**
+ * v4l2_m2m_device_run_work() - run pending jobs for the context
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void v4l2_m2m_device_run_work(struct work_struct *work)
+{
+ struct v4l2_m2m_dev *m2m_dev =
+ container_of(work, struct v4l2_m2m_dev, job_work);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
* v4l2_m2m_cancel_job() - cancel pending jobs for the context
* @m2m_ctx: m2m context with jobs to be canceled
*
@@ -421,7 +438,12 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
/* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes. */
- v4l2_m2m_try_schedule(m2m_ctx);
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+
+ /* We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
@@ -863,6 +885,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
m2m_dev->m2m_ops = m2m_ops;
INIT_LIST_HEAD(&m2m_dev->job_queue);
spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
return m2m_dev;
}
OpenPOWER on IntegriCloud