diff options
Diffstat (limited to 'drivers/misc/fastrpc.c')
-rw-r--r-- | drivers/misc/fastrpc.c | 297 |
1 files changed, 246 insertions, 51 deletions
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 98603e235cf0..e3e085e33d46 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -32,9 +32,9 @@ #define FASTRPC_CTX_MAX (256) #define FASTRPC_INIT_HANDLE 1 #define FASTRPC_CTXID_MASK (0xFF0) -#define INIT_FILELEN_MAX (64 * 1024 * 1024) -#define INIT_MEMLEN_MAX (8 * 1024 * 1024) +#define INIT_FILELEN_MAX (2 * 1024 * 1024) #define FASTRPC_DEVICE_NAME "fastrpc" +#define ADSP_MMAP_ADD_PAGES 0x1000 /* Retrives number of input buffers from the scalars parameter */ #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) @@ -67,6 +67,8 @@ /* Remote Method id table */ #define FASTRPC_RMID_INIT_ATTACH 0 #define FASTRPC_RMID_INIT_RELEASE 1 +#define FASTRPC_RMID_INIT_MMAP 4 +#define FASTRPC_RMID_INIT_MUNMAP 5 #define FASTRPC_RMID_INIT_CREATE 6 #define FASTRPC_RMID_INIT_CREATE_ATTR 7 #define FASTRPC_RMID_INIT_CREATE_STATIC 8 @@ -90,6 +92,23 @@ struct fastrpc_remote_arg { u64 len; }; +struct fastrpc_mmap_rsp_msg { + u64 vaddr; +}; + +struct fastrpc_mmap_req_msg { + s32 pgid; + u32 flags; + u64 vaddr; + s32 num; +}; + +struct fastrpc_munmap_req_msg { + s32 pgid; + u64 vaddr; + u64 size; +}; + struct fastrpc_msg { int pid; /* process group id */ int tid; /* thread id */ @@ -124,6 +143,9 @@ struct fastrpc_buf { /* Lock for dma buf attachments */ struct mutex lock; struct list_head attachments; + /* mmap support */ + struct list_head node; /* list of user requested mmaps */ + uintptr_t raddr; }; struct fastrpc_dma_buf_attachment { @@ -186,12 +208,14 @@ struct fastrpc_channel_ctx { struct idr ctx_idr; struct list_head users; struct miscdevice miscdev; + struct kref refcount; }; struct fastrpc_user { struct list_head user; struct list_head maps; struct list_head pending; + struct list_head mmaps; struct fastrpc_channel_ctx *cctx; struct fastrpc_session_ctx *sctx; @@ -269,6 +293,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, return -ENOMEM; INIT_LIST_HEAD(&buf->attachments); + INIT_LIST_HEAD(&buf->node); mutex_init(&buf->lock); buf->fl = fl; @@ -276,11 +301,15 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, buf->phys = 0; buf->size = size; buf->dev = dev; + buf->raddr = 0; buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, GFP_KERNEL); - if (!buf->virt) + if (!buf->virt) { + mutex_destroy(&buf->lock); + kfree(buf); return -ENOMEM; + } if (fl->sctx && fl->sctx->sid) buf->phys += ((u64)fl->sctx->sid << 32); @@ -290,6 +319,25 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, return 0; } +static void fastrpc_channel_ctx_free(struct kref *ref) +{ + struct fastrpc_channel_ctx *cctx; + + cctx = container_of(ref, struct fastrpc_channel_ctx, refcount); + + kfree(cctx); +} + +static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx) +{ + kref_get(&cctx->refcount); +} + +static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx) +{ + kref_put(&cctx->refcount, fastrpc_channel_ctx_free); +} + static void fastrpc_context_free(struct kref *ref) { struct fastrpc_invoke_ctx *ctx; @@ -313,6 +361,8 @@ static void fastrpc_context_free(struct kref *ref) kfree(ctx->maps); kfree(ctx->olaps); kfree(ctx); + + fastrpc_channel_ctx_put(cctx); } static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) @@ -419,6 +469,9 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc( fastrpc_get_buff_overlaps(ctx); } + /* Released in fastrpc_context_put() */ + fastrpc_channel_ctx_get(cctx); + ctx->sc = sc; ctx->retval = -1; ctx->pid = current->pid; @@ -448,6 +501,7 @@ err_idr: spin_lock(&user->lock); list_del(&ctx->node); spin_unlock(&user->lock); + fastrpc_channel_ctx_put(cctx); kfree(ctx->maps); kfree(ctx->olaps); kfree(ctx); @@ -499,6 +553,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, FASTRPC_PHYS(buffer->phys), buffer->size); if (ret < 0) { dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); + kfree(a); return -EINVAL; } @@ -522,16 +577,10 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_del(&a->node); mutex_unlock(&buffer->lock); + sg_free_table(&a->sgt); kfree(a); } -static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) -{ - struct fastrpc_buf *buf = dmabuf->priv; - - return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; -} - static void *fastrpc_vmap(struct dma_buf *dmabuf) { struct fastrpc_buf *buf = dmabuf->priv; @@ -555,7 +604,6 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = { .map_dma_buf = fastrpc_map_dma_buf, .unmap_dma_buf = fastrpc_unmap_dma_buf, .mmap = fastrpc_mmap, - .map = fastrpc_kmap, .vmap = fastrpc_vmap, .release = fastrpc_release, }; @@ -884,6 +932,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (!fl->sctx) return -EINVAL; + if (!fl->cctx->rpdev) + return -EPIPE; + ctx = fastrpc_context_alloc(fl, kernel, sc, args); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -901,8 +952,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (err) goto bail; - /* Wait for remote dsp to respond or time out */ - err = wait_for_completion_interruptible(&ctx->work); + if (kernel) { + if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) + err = -ETIMEDOUT; + } else { + err = wait_for_completion_interruptible(&ctx->work); + } + if (err) goto bail; @@ -921,12 +977,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, } bail: - /* We are done with this compute context, remove it from pending list */ - spin_lock(&fl->lock); - list_del(&ctx->node); - spin_unlock(&fl->lock); - fastrpc_context_put(ctx); - + if (err != -ERESTARTSYS && err != -ETIMEDOUT) { + /* We are done with this compute context */ + spin_lock(&fl->lock); + list_del(&ctx->node); + spin_unlock(&fl->lock); + fastrpc_context_put(ctx); + } if (err) dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); @@ -1098,6 +1155,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_channel_ctx *cctx = fl->cctx; struct fastrpc_invoke_ctx *ctx, *n; struct fastrpc_map *map, *m; + struct fastrpc_buf *buf, *b; unsigned long flags; fastrpc_release_current_dsp_process(fl); @@ -1119,7 +1177,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) fastrpc_map_put(map); } + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { + list_del(&buf->node); + fastrpc_buf_free(buf); + } + fastrpc_session_free(cctx, fl->sctx); + fastrpc_channel_ctx_put(cctx); mutex_destroy(&fl->mutex); kfree(fl); @@ -1138,11 +1202,15 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) if (!fl) return -ENOMEM; + /* Released in fastrpc_device_release() */ + fastrpc_channel_ctx_get(cctx); + filp->private_data = fl; spin_lock_init(&fl->lock); mutex_init(&fl->mutex); INIT_LIST_HEAD(&fl->pending); INIT_LIST_HEAD(&fl->maps); + INIT_LIST_HEAD(&fl->mmaps); INIT_LIST_HEAD(&fl->user); fl->tgid = current->tgid; fl->cctx = cctx; @@ -1163,26 +1231,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) return 0; } -static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp) -{ - struct dma_buf *buf; - int info; - - if (copy_from_user(&info, argp, sizeof(info))) - return -EFAULT; - - buf = dma_buf_get(info); - if (IS_ERR_OR_NULL(buf)) - return -EINVAL; - /* - * one for the last get and other for the ALLOC_DMA_BUFF ioctl - */ - dma_buf_put(buf); - dma_buf_put(buf); - - return 0; -} - static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_alloc_dma_buf bp; @@ -1218,8 +1266,6 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) return -EFAULT; } - get_dma_buf(buf->dmabuf); - return 0; } @@ -1270,6 +1316,148 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) return err; } +static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, + struct fastrpc_req_munmap *req) +{ + struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; + struct fastrpc_buf *buf, *b; + struct fastrpc_munmap_req_msg req_msg; + struct device *dev = fl->sctx->dev; + int err; + u32 sc; + + spin_lock(&fl->lock); + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { + if ((buf->raddr == req->vaddrout) && (buf->size == req->size)) + break; + buf = NULL; + } + spin_unlock(&fl->lock); + + if (!buf) { + dev_err(dev, "mmap not in list\n"); + return -EINVAL; + } + + req_msg.pgid = fl->tgid; + req_msg.size = buf->size; + req_msg.vaddr = buf->raddr; + + args[0].ptr = (u64) (uintptr_t) &req_msg; + args[0].length = sizeof(req_msg); + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0); + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, + &args[0]); + if (!err) { + dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); + spin_lock(&fl->lock); + list_del(&buf->node); + spin_unlock(&fl->lock); + fastrpc_buf_free(buf); + } else { + dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); + } + + return err; +} + +static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_req_munmap req; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + + return fastrpc_req_munmap_impl(fl, &req); +} + +static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } }; + struct fastrpc_buf *buf = NULL; + struct fastrpc_mmap_req_msg req_msg; + struct fastrpc_mmap_rsp_msg rsp_msg; + struct fastrpc_req_munmap req_unmap; + struct fastrpc_phy_page pages; + struct fastrpc_req_mmap req; + struct device *dev = fl->sctx->dev; + int err; + u32 sc; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + + if (req.flags != ADSP_MMAP_ADD_PAGES) { + dev_err(dev, "flag not supported 0x%x\n", req.flags); + return -EINVAL; + } + + if (req.vaddrin) { + dev_err(dev, "adding user allocated pages is not supported\n"); + return -EINVAL; + } + + err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); + if (err) { + dev_err(dev, "failed to allocate buffer\n"); + return err; + } + + req_msg.pgid = fl->tgid; + req_msg.flags = req.flags; + req_msg.vaddr = req.vaddrin; + req_msg.num = sizeof(pages); + + args[0].ptr = (u64) (uintptr_t) &req_msg; + args[0].length = sizeof(req_msg); + + pages.addr = buf->phys; + pages.size = buf->size; + + args[1].ptr = (u64) (uintptr_t) &pages; + args[1].length = sizeof(pages); + + args[2].ptr = (u64) (uintptr_t) &rsp_msg; + args[2].length = sizeof(rsp_msg); + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1); + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, + &args[0]); + if (err) { + dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); + goto err_invoke; + } + + /* update the buffer to be able to deallocate the memory on the DSP */ + buf->raddr = (uintptr_t) rsp_msg.vaddr; + + /* let the client know the address to use */ + req.vaddrout = rsp_msg.vaddr; + + spin_lock(&fl->lock); + list_add_tail(&buf->node, &fl->mmaps); + spin_unlock(&fl->lock); + + if (copy_to_user((void __user *)argp, &req, sizeof(req))) { + /* unmap the memory and release the buffer */ + req_unmap.vaddrout = buf->raddr; + req_unmap.size = buf->size; + fastrpc_req_munmap_impl(fl, &req_unmap); + return -EFAULT; + } + + dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n", + buf->raddr, buf->size); + + return 0; + +err_invoke: + fastrpc_buf_free(buf); + + return err; +} + static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1287,12 +1475,15 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); break; - case FASTRPC_IOCTL_FREE_DMA_BUFF: - err = fastrpc_dmabuf_free(fl, argp); - break; case FASTRPC_IOCTL_ALLOC_DMA_BUFF: err = fastrpc_dmabuf_alloc(fl, argp); break; + case FASTRPC_IOCTL_MMAP: + err = fastrpc_req_mmap(fl, argp); + break; + case FASTRPC_IOCTL_MUNMAP: + err = fastrpc_req_munmap(fl, argp); + break; default: err = -ENOTTY; break; @@ -1395,10 +1586,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) int i, err, domain_id = -1; const char *domain; - data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - err = of_property_read_string(rdev->of_node, "label", &domain); if (err) { dev_info(rdev, "FastRPC Domain not specified in DT\n"); @@ -1417,14 +1604,20 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return -EINVAL; } + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->miscdev.minor = MISC_DYNAMIC_MINOR; - data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", - domains[domain_id]); + data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s", + domains[domain_id]); data->miscdev.fops = &fastrpc_fops; err = misc_register(&data->miscdev); if (err) return err; + kref_init(&data->refcount); + dev_set_drvdata(&rpdev->dev, data); dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); INIT_LIST_HEAD(&data->users); @@ -1459,7 +1652,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) misc_deregister(&cctx->miscdev); of_platform_depopulate(&rpdev->dev); - kfree(cctx); + + cctx->rpdev = NULL; + fastrpc_channel_ctx_put(cctx); } static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, |