Lines Matching refs:fl
195 struct fastrpc_user *fl;
217 struct fastrpc_user *fl;
246 struct fastrpc_user *fl;
320 int vmid = map->fl->cctx->vmperms[0].vmid;
329 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
340 if (map->fl) {
341 spin_lock(&map->fl->lock);
343 spin_unlock(&map->fl->lock);
344 map->fl = NULL;
365 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
368 struct fastrpc_session_ctx *sess = fl->sctx;
372 spin_lock(&fl->lock);
373 list_for_each_entry(map, &fl->maps, node) {
390 spin_unlock(&fl->lock);
402 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
415 buf->fl = fl;
435 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
441 ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
447 if (fl->sctx && fl->sctx->sid)
448 buf->phys += ((u64)fl->sctx->sid << 32);
453 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
456 struct device *rdev = &fl->cctx->rpdev->dev;
458 return __fastrpc_buf_alloc(fl, rdev, size, obuf);
588 ctx->fl = user;
755 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
758 struct fastrpc_session_ctx *sess = fl->sctx;
763 if (!fastrpc_map_lookup(fl, fd, ppmap, true))
773 map->fl = fl;
799 map->phys += ((u64)fl->sctx->sid << 32);
815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
825 spin_lock(&fl->lock);
826 list_add_tail(&map->node, &fl->maps);
827 spin_unlock(&fl->lock);
905 struct device *dev = ctx->fl->sctx->dev;
914 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
937 struct device *dev = ctx->fl->sctx->dev;
957 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
1065 struct fastrpc_user *fl = ctx->fl;
1098 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
1110 struct fastrpc_user *fl = ctx->fl;
1114 cctx = fl->cctx;
1115 msg->pid = fl->tgid;
1121 msg->ctx = ctx->ctxid | fl->pd;
1137 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1146 if (!fl->sctx)
1149 if (!fl->cctx->rpdev)
1153 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1157 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1168 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1197 spin_lock(&fl->lock);
1199 spin_unlock(&fl->lock);
1204 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1206 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
1211 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1216 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1219 if (!fl->is_secure_dev && fl->cctx->secure) {
1225 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1226 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1234 static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
1274 if (!fl->cctx->remote_heap) {
1275 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
1276 &fl->cctx->remote_heap);
1281 if (fl->cctx->vmcount) {
1282 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1283 (u64)fl->cctx->remote_heap->size,
1284 &fl->cctx->perms,
1285 fl->cctx->vmperms, fl->cctx->vmcount);
1287 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
1288 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1294 inbuf.pgid = fl->tgid;
1297 fl->pd = USER_PD;
1307 pages[0].addr = fl->cctx->remote_heap->phys;
1308 pages[0].size = fl->cctx->remote_heap->size;
1316 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1325 if (fl->cctx->vmcount) {
1330 for (i = 0; i < fl->cctx->vmcount; i++)
1331 src_perms |= BIT(fl->cctx->vmperms[i].vmid);
1335 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1336 (u64)fl->cctx->remote_heap->size,
1339 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1340 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1343 fastrpc_buf_free(fl->cctx->remote_heap);
1352 static int fastrpc_init_create_process(struct fastrpc_user *fl,
1385 if (is_session_rejected(fl, unsigned_module)) {
1395 inbuf.pgid = fl->tgid;
1401 fl->pd = USER_PD;
1404 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1411 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1416 fl->init_mem = imem;
1448 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1458 fl->init_mem = NULL;
1498 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1504 tgid = fl->tgid;
1510 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1516 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1517 struct fastrpc_channel_ctx *cctx = fl->cctx;
1523 fastrpc_release_current_dsp_process(fl);
1526 list_del(&fl->user);
1529 if (fl->init_mem)
1530 fastrpc_buf_free(fl->init_mem);
1532 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1537 list_for_each_entry_safe(map, m, &fl->maps, node)
1540 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1545 fastrpc_session_free(cctx, fl->sctx);
1548 mutex_destroy(&fl->mutex);
1549 kfree(fl);
1559 struct fastrpc_user *fl = NULL;
1565 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1566 if (!fl)
1572 filp->private_data = fl;
1573 spin_lock_init(&fl->lock);
1574 mutex_init(&fl->mutex);
1575 INIT_LIST_HEAD(&fl->pending);
1576 INIT_LIST_HEAD(&fl->maps);
1577 INIT_LIST_HEAD(&fl->mmaps);
1578 INIT_LIST_HEAD(&fl->user);
1579 fl->tgid = current->tgid;
1580 fl->cctx = cctx;
1581 fl->is_secure_dev = fdevice->secure;
1583 fl->sctx = fastrpc_session_alloc(cctx);
1584 if (!fl->sctx) {
1586 mutex_destroy(&fl->mutex);
1587 kfree(fl);
1593 list_add_tail(&fl->user, &cctx->users);
1599 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1609 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1644 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1647 int tgid = fl->tgid;
1654 fl->pd = pd;
1656 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1660 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1684 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1690 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1704 fl->pd = USER_PD;
1706 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1711 struct fastrpc_user *fl)
1713 struct fastrpc_channel_ctx *cctx = fl->cctx;
1732 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1754 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1764 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1771 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1776 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1781 err = fastrpc_get_info_from_kernel(&cap, fl);
1791 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
1795 struct device *dev = fl->sctx->dev;
1799 req_msg.pgid = fl->tgid;
1807 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1811 spin_lock(&fl->lock);
1813 spin_unlock(&fl->lock);
1822 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1826 struct device *dev = fl->sctx->dev;
1831 spin_lock(&fl->lock);
1832 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1838 spin_unlock(&fl->lock);
1846 return fastrpc_req_munmap_impl(fl, buf);
1849 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1857 struct device *dev = fl->sctx->dev;
1876 err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
1878 err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
1885 req_msg.pgid = fl->tgid;
1903 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1917 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
1919 &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
1921 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1927 spin_lock(&fl->lock);
1928 list_add_tail(&buf->node, &fl->mmaps);
1929 spin_unlock(&fl->lock);
1942 fastrpc_req_munmap_impl(fl, buf);
1949 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1956 struct device *dev = fl->sctx->dev;
1958 spin_lock(&fl->lock);
1959 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1966 spin_unlock(&fl->lock);
1973 req_msg.pgid = fl->tgid;
1982 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1993 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
2000 return fastrpc_req_mem_unmap_impl(fl, &req);
2003 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
2011 struct device *dev = fl->sctx->dev;
2020 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
2026 req_msg.pgid = fl->tgid;
2051 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
2068 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
2083 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
2089 err = fastrpc_invoke(fl, argp);
2092 err = fastrpc_init_attach(fl, ROOT_PD);
2095 err = fastrpc_init_attach(fl, SENSORS_PD);
2098 err = fastrpc_init_create_static_process(fl, argp);
2101 err = fastrpc_init_create_process(fl, argp);
2104 err = fastrpc_dmabuf_alloc(fl, argp);
2107 err = fastrpc_req_mmap(fl, argp);
2110 err = fastrpc_req_munmap(fl, argp);
2113 err = fastrpc_req_mem_map(fl, argp);
2116 err = fastrpc_req_mem_unmap(fl, argp);
2119 err = fastrpc_get_dsp_info(fl, argp);