summaryrefslogtreecommitdiffstats
path: root/drivers/rpmsg
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/rpmsg')
-rw-r--r--drivers/rpmsg/Kconfig11
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c414
-rw-r--r--drivers/rpmsg/qcom_glink_native.c55
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c16
-rw-r--r--drivers/rpmsg/rpmsg_core.c8
-rw-r--r--drivers/rpmsg/rpmsg_internal.h5
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
9 files changed, 485 insertions, 31 deletions
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index d0322b41eca5..a9108ff563dc 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -15,13 +15,22 @@ config RPMSG_CHAR
in /dev. They make it possible for user-space programs to send and
receive rpmsg packets.
+config RPMSG_MTK_SCP
+ tristate "MediaTek SCP"
+ depends on MTK_SCP
+ select RPMSG
+ help
+ Say y here to enable support providing communication channels to
+ remote processors in MediaTek platforms.
+ This use IPI and IPC to communicate with remote processors.
+
config RPMSG_QCOM_GLINK_NATIVE
tristate
select RPMSG
config RPMSG_QCOM_GLINK_RPM
tristate "Qualcomm RPM Glink driver"
- select RPMSG_QCOM_GLINK_NATIVE
+ select RPMSG_QCOM_GLINK_NATIVE
depends on HAS_IOMEM
depends on MAILBOX
help
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 9aa859502d27..ae92a7fb08f6 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
+obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
new file mode 100644
index 000000000000..232aa4e40133
--- /dev/null
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2019 Google LLC.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/workqueue.h>
+
+#include "rpmsg_internal.h"
+
+struct mtk_rpmsg_rproc_subdev {
+ struct platform_device *pdev;
+ struct mtk_rpmsg_info *info;
+ struct rpmsg_endpoint *ns_ept;
+ struct rproc_subdev subdev;
+
+ struct work_struct register_work;
+ struct list_head channels;
+ struct mutex channels_lock;
+};
+
+#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
+
+struct mtk_rpmsg_channel_info {
+ struct rpmsg_channel_info info;
+ bool registered;
+ struct list_head list;
+};
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ *
+ * This message is sent across to publish a new service. When we receive these
+ * messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
+ * ->probe() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+} __packed;
+
+struct mtk_rpmsg_device {
+ struct rpmsg_device rpdev;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+struct mtk_rpmsg_endpoint {
+ struct rpmsg_endpoint ept;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
+#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
+
+static void __mtk_ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ kfree(to_mtk_rpmsg_endpoint(ept));
+}
+
+static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_rpmsg_endpoint *mept = priv;
+ struct rpmsg_endpoint *ept = &mept->ept;
+ int ret;
+
+ ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
+ if (ret)
+ dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
+ ret);
+}
+
+static struct rpmsg_endpoint *
+__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+ u32 id)
+{
+ struct mtk_rpmsg_endpoint *mept;
+ struct rpmsg_endpoint *ept;
+ struct platform_device *pdev = mtk_subdev->pdev;
+ int ret;
+
+ mept = kzalloc(sizeof(*mept), GFP_KERNEL);
+ if (!mept)
+ return NULL;
+ mept->mtk_subdev = mtk_subdev;
+
+ ept = &mept->ept;
+ kref_init(&ept->refcount);
+
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+ ept->ops = &mtk_rpmsg_endpoint_ops;
+ ept->addr = id;
+
+ ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
+ mept);
+ if (ret) {
+ dev_err(&pdev->dev, "IPI register failed, id = %d", id);
+ kref_put(&ept->refcount, __mtk_ept_release);
+ return NULL;
+ }
+
+ return ept;
+}
+
+static struct rpmsg_endpoint *
+mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_device(rpdev)->mtk_subdev;
+
+ return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
+}
+
+static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
+ kref_put(&ept->refcount, __mtk_ept_release);
+}
+
+static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+ len, 0);
+}
+
+static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ /*
+ * TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
+ * received the last command.
+ */
+ return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+ len, 0);
+}
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
+ .destroy_ept = mtk_rpmsg_destroy_ept,
+ .send = mtk_rpmsg_send,
+ .trysend = mtk_rpmsg_trysend,
+};
+
+static void mtk_rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
+
+ kfree(mdev);
+}
+
+static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
+ .create_ept = mtk_rpmsg_create_ept,
+};
+
+static struct device_node *
+mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ int ret;
+
+ for_each_available_child_of_node(node, child) {
+ ret = of_property_read_string(child, "mtk,rpmsg-name", &name);
+ if (ret)
+ continue;
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ struct rpmsg_channel_info *info)
+{
+ struct rpmsg_device *rpdev;
+ struct mtk_rpmsg_device *mdev;
+ struct platform_device *pdev = mtk_subdev->pdev;
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->mtk_subdev = mtk_subdev;
+
+ rpdev = &mdev->rpdev;
+ rpdev->ops = &mtk_rpmsg_device_ops;
+ rpdev->src = info->src;
+ rpdev->dst = info->dst;
+ strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
+
+ rpdev->dev.of_node =
+ mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
+ rpdev->dev.parent = &pdev->dev;
+ rpdev->dev.release = mtk_rpmsg_release_device;
+
+ ret = rpmsg_register_device(rpdev);
+ if (ret) {
+ kfree(mdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_register_device_work_function(struct work_struct *register_work)
+{
+ struct mtk_rpmsg_rproc_subdev *subdev = container_of(
+ register_work, struct mtk_rpmsg_rproc_subdev, register_work);
+ struct platform_device *pdev = subdev->pdev;
+ struct mtk_rpmsg_channel_info *info;
+ int ret;
+
+ mutex_lock(&subdev->channels_lock);
+ list_for_each_entry(info, &subdev->channels, list) {
+ if (info->registered)
+ continue;
+
+ ret = mtk_rpmsg_register_device(subdev, &info->info);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't create rpmsg_device\n");
+ continue;
+ }
+
+ info->registered = true;
+ }
+ mutex_unlock(&subdev->channels_lock);
+}
+
+static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ char *name, u32 addr)
+{
+ struct mtk_rpmsg_channel_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ strscpy(info->info.name, name, RPMSG_NAME_SIZE);
+ info->info.src = addr;
+ info->info.dst = RPMSG_ADDR_ANY;
+ mutex_lock(&mtk_subdev->channels_lock);
+ list_add(&info->list, &mtk_subdev->channels);
+ mutex_unlock(&mtk_subdev->channels_lock);
+
+ schedule_work(&mtk_subdev->register_work);
+ return 0;
+}
+
+static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct rpmsg_ns_msg *msg = data;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
+ struct device *dev = &mtk_subdev->pdev->dev;
+
+ int ret;
+
+ if (len != sizeof(*msg)) {
+ dev_err(dev, "malformed ns msg (%d)\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * the name service ept does _not_ belong to a real rpmsg channel,
+ * and is handled by the rpmsg bus itself.
+ * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+ * in somehow.
+ */
+ if (rpdev) {
+ dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+ return -EINVAL;
+ }
+
+ /* don't trust the remote processor for null terminating the name */
+ msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+ dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
+
+ ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
+ if (ret) {
+ dev_err(dev, "create rpmsg device failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ /* a dedicated endpoint handles the name service msgs */
+ if (mtk_subdev->info->ns_ipi_id >= 0) {
+ mtk_subdev->ns_ept =
+ __mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
+ mtk_subdev,
+ mtk_subdev->info->ns_ipi_id);
+ if (!mtk_subdev->ns_ept) {
+ dev_err(&mtk_subdev->pdev->dev,
+ "failed to create name service endpoint\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ if (mtk_subdev->ns_ept) {
+ mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+ mtk_subdev->ns_ept = NULL;
+ }
+}
+
+static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct mtk_rpmsg_channel_info *info, *next;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+ struct device *dev = &mtk_subdev->pdev->dev;
+
+ /*
+ * Destroy the name service endpoint here, to avoid new channel being
+ * created after the rpmsg_unregister_device loop below.
+ */
+ if (mtk_subdev->ns_ept) {
+ mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+ mtk_subdev->ns_ept = NULL;
+ }
+
+ cancel_work_sync(&mtk_subdev->register_work);
+
+ mutex_lock(&mtk_subdev->channels_lock);
+ list_for_each_entry(info, &mtk_subdev->channels, list) {
+ if (!info->registered)
+ continue;
+ if (rpmsg_unregister_device(dev, &info->info)) {
+ dev_warn(
+ dev,
+ "rpmsg_unregister_device failed for %s.%d.%d\n",
+ info->info.name, info->info.src,
+ info->info.dst);
+ }
+ }
+
+ list_for_each_entry_safe(info, next,
+ &mtk_subdev->channels, list) {
+ list_del(&info->list);
+ kfree(info);
+ }
+ mutex_unlock(&mtk_subdev->channels_lock);
+}
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+ struct mtk_rpmsg_info *info)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+
+ mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
+ if (!mtk_subdev)
+ return NULL;
+
+ mtk_subdev->pdev = pdev;
+ mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
+ mtk_subdev->subdev.stop = mtk_rpmsg_stop;
+ mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
+ mtk_subdev->info = info;
+ INIT_LIST_HEAD(&mtk_subdev->channels);
+ INIT_WORK(&mtk_subdev->register_work,
+ mtk_register_device_work_function);
+ mutex_init(&mtk_subdev->channels_lock);
+
+ return &mtk_subdev->subdev;
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ kfree(mtk_subdev);
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp rpmsg driver");
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index f46c787733e8..1995f5b3ea67 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref)
{
struct glink_channel *channel = container_of(ref, struct glink_channel,
refcount);
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *tmp;
unsigned long flags;
+ int iid;
+
+ /* cancel pending rx_done work */
+ cancel_work_sync(&channel->intent_work);
spin_lock_irqsave(&channel->intent_lock, flags);
+ /* Free all non-reuse intents pending rx_done work */
+ list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
+ if (!intent->reuse) {
+ kfree(intent->data);
+ kfree(intent);
+ }
+ }
+
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ kfree(tmp->data);
+ kfree(tmp);
+ }
idr_destroy(&channel->liids);
+
+ idr_for_each_entry(&channel->riids, tmp, iid)
+ kfree(tmp);
idr_destroy(&channel->riids);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -892,7 +913,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
struct intent_pair intents[];
} __packed * msg;
- const size_t msglen = sizeof(*msg) + sizeof(struct intent_pair) * count;
+ const size_t msglen = struct_size(msg, intents, count);
int ret;
int i;
unsigned long flags;
@@ -1094,13 +1115,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
close_link:
/*
* Send a close request to "undo" our open-ack. The close-ack will
- * release the last reference.
+ * release qcom_glink_send_open_req() reference and the last reference
+ * will be relesed after receiving remote_close or transport unregister
+ * by calling qcom_glink_native_remove().
*/
qcom_glink_send_close_req(glink, channel);
- /* Release qcom_glink_send_open_req() reference */
- kref_put(&channel->refcount, qcom_glink_channel_release);
-
return ret;
}
@@ -1415,15 +1435,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
ret = rpmsg_register_device(rpdev);
if (ret)
- goto free_rpdev;
+ goto rcid_remove;
channel->rpdev = rpdev;
}
return 0;
-free_rpdev:
- kfree(rpdev);
rcid_remove:
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
@@ -1544,6 +1562,18 @@ static void qcom_glink_work(struct work_struct *work)
}
}
+static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
+{
+ struct glink_defer_cmd *dcmd;
+ struct glink_defer_cmd *tmp;
+
+ /* cancel any pending deferred rx_work */
+ cancel_work_sync(&glink->rx_work);
+
+ list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
+ kfree(dcmd);
+}
+
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
struct qcom_glink_pipe *rx,
@@ -1619,23 +1649,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
struct glink_channel *channel;
int cid;
int ret;
- unsigned long flags;
disable_irq(glink->irq);
- cancel_work_sync(&glink->rx_work);
+ qcom_glink_cancel_rx_work(glink);
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
- spin_lock_irqsave(&glink->idr_lock, flags);
/* Release any defunct local channels, waiting for close-ack */
idr_for_each_entry(&glink->lcids, channel, cid)
kref_put(&channel->refcount, qcom_glink_channel_release);
+ /* Release any defunct local channels, waiting for close-req */
+ idr_for_each_entry(&glink->rcids, channel, cid)
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+
idr_destroy(&glink->lcids);
idr_destroy(&glink->rcids);
- spin_unlock_irqrestore(&glink->idr_lock, flags);
mbox_free_channel(glink->mbox_chan);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
index 64a5ce324c7f..579bc4443f6d 100644
--- a/drivers/rpmsg/qcom_glink_smem.c
+++ b/drivers/rpmsg/qcom_glink_smem.c
@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
tail = le32_to_cpu(*pipe->tail);
tail += count;
- if (tail > pipe->native.length)
+ if (tail >= pipe->native.length)
tail -= pipe->native.length;
*pipe->tail = cpu_to_le32(tail);
@@ -201,7 +201,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
dev->parent = parent;
dev->of_node = node;
dev->release = qcom_glink_smem_release;
- dev_set_name(dev, "%pOFn:%pOFn", node->parent, node);
+ dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
ret = device_register(dev);
if (ret) {
pr_err("failed to register glink edge\n");
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index eea5ebbb5119..4bbbacdbf3bb 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -146,7 +146,6 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
{
struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
struct device *dev = &eptdev->dev;
- struct sk_buff *skb;
/* Close the endpoint, if it's not already destroyed by the parent */
mutex_lock(&eptdev->ept_lock);
@@ -157,10 +156,7 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
mutex_unlock(&eptdev->ept_lock);
/* Discard all SKBs */
- while (!skb_queue_empty(&eptdev->queue)) {
- skb = skb_dequeue(&eptdev->queue);
- kfree_skb(skb);
- }
+ skb_queue_purge(&eptdev->queue);
put_device(dev);
@@ -227,8 +223,10 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
if (!kbuf)
return -ENOMEM;
- if (!copy_from_iter_full(kbuf, len, from))
- return -EFAULT;
+ if (!copy_from_iter_full(kbuf, len, from)) {
+ ret = -EFAULT;
+ goto free_kbuf;
+ }
if (mutex_lock_interruptible(&eptdev->ept_lock)) {
ret = -ERESTARTSYS;
@@ -290,7 +288,7 @@ static const struct file_operations rpmsg_eptdev_fops = {
.write_iter = rpmsg_eptdev_write_iter,
.poll = rpmsg_eptdev_poll,
.unlocked_ioctl = rpmsg_eptdev_ioctl,
- .compat_ioctl = rpmsg_eptdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
@@ -451,7 +449,7 @@ static const struct file_operations rpmsg_ctrldev_fops = {
.open = rpmsg_ctrldev_open,
.release = rpmsg_ctrldev_release,
.unlocked_ioctl = rpmsg_ctrldev_ioctl,
- .compat_ioctl = rpmsg_ctrldev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void rpmsg_ctrldev_release_device(struct device *dev)
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index ea88fd4e2a6e..e330ec4dfc33 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -46,7 +46,7 @@
* equals to the src address of their rpmsg channel), the driver's handler
* is invoked to process it.
*
- * That said, more complicated drivers might do need to allocate
+ * That said, more complicated drivers might need to allocate
* additional rpmsg addresses, and bind them to different rx callbacks.
* To accomplish that, those drivers need to call this function.
*
@@ -177,7 +177,7 @@ int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
EXPORT_SYMBOL(rpmsg_send_offchannel);
/**
- * rpmsg_send() - send a message across to the remote processor
+ * rpmsg_trysend() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of message
* @len: length of payload
@@ -205,7 +205,7 @@ int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
EXPORT_SYMBOL(rpmsg_trysend);
/**
- * rpmsg_sendto() - send a message across to the remote processor, specify dst
+ * rpmsg_trysendto() - send a message across to the remote processor, specify dst
* @ept: the rpmsg endpoint
* @data: payload of message
* @len: length of payload
@@ -253,7 +253,7 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
EXPORT_SYMBOL(rpmsg_poll);
/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
+ * rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
* @ept: the rpmsg endpoint
* @src: source address
* @dst: destination address
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 0d791c30b7ea..3fc83cd50e98 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -20,7 +20,7 @@
/**
* struct rpmsg_device_ops - indirection table for the rpmsg_device operations
- * @create_ept: create backend-specific endpoint, requried
+ * @create_ept: create backend-specific endpoint, required
* @announce_create: announce presence of new channel, optional
* @announce_destroy: announce destruction of channel, optional
*
@@ -39,13 +39,14 @@ struct rpmsg_device_ops {
/**
* struct rpmsg_endpoint_ops - indirection table for rpmsg_endpoint operations
- * @destroy_ept: destroy the given endpoint, required
+ * @destroy_ept: see @rpmsg_destroy_ept(), required
* @send: see @rpmsg_send(), required
* @sendto: see @rpmsg_sendto(), optional
* @send_offchannel: see @rpmsg_send_offchannel(), optional
* @trysend: see @rpmsg_trysend(), required
* @trysendto: see @rpmsg_trysendto(), optional
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
+ * @poll: see @rpmsg_poll(), optional
*
* Indirection table for the operations that a rpmsg backend should implement.
* In addition to @destroy_ept, the backend must at least implement @send and
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 5d3685bd76a2..376ebbf880d6 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -920,7 +920,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
goto vqs_del;
}
- dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n",
+ dev_dbg(&vdev->dev, "buffers: va %pK, dma %pad\n",
bufs_va, &vrp->bufs_dma);
/* half of the buffers is dedicated for RX */
OpenPOWER on IntegriCloud