Lines Matching defs:kproc

112 	struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
114 struct device *dev = kproc->rproc->dev.parent;
115 const char *name = kproc->rproc->name;
135 if (msg > kproc->rproc->max_notifyid) {
140 if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
153 struct k3_dsp_rproc *kproc = rproc->priv;
159 ret = mbox_send_message(kproc->mbox, (void *)msg);
166 static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
168 struct device *dev = kproc->dev;
171 ret = reset_control_assert(kproc->reset);
177 if (kproc->data->uses_lreset)
180 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
181 kproc->ti_sci_id);
184 if (reset_control_deassert(kproc->reset))
192 static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
194 struct device *dev = kproc->dev;
197 if (kproc->data->uses_lreset)
200 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
201 kproc->ti_sci_id);
208 ret = reset_control_deassert(kproc->reset);
211 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
212 kproc->ti_sci_id))
221 struct k3_dsp_rproc *kproc = rproc->priv;
222 struct mbox_client *client = &kproc->client;
223 struct device *dev = kproc->dev;
232 kproc->mbox = mbox_request_channel(client, 0);
233 if (IS_ERR(kproc->mbox)) {
236 PTR_ERR(kproc->mbox));
247 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
250 mbox_free_channel(kproc->mbox);
268 struct k3_dsp_rproc *kproc = rproc->priv;
269 struct device *dev = kproc->dev;
272 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
273 kproc->ti_sci_id);
292 struct k3_dsp_rproc *kproc = rproc->priv;
293 struct device *dev = kproc->dev;
296 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
297 kproc->ti_sci_id);
313 struct k3_dsp_rproc *kproc = rproc->priv;
314 struct device *dev = kproc->dev;
323 if (boot_addr & (kproc->data->boot_align_addr - 1)) {
325 boot_addr, kproc->data->boot_align_addr);
331 ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
335 ret = k3_dsp_rproc_release(kproc);
342 mbox_free_channel(kproc->mbox);
354 struct k3_dsp_rproc *kproc = rproc->priv;
356 mbox_free_channel(kproc->mbox);
358 k3_dsp_rproc_reset(kproc);
373 struct k3_dsp_rproc *kproc = rproc->priv;
374 struct device *dev = kproc->dev;
395 struct k3_dsp_rproc *kproc = rproc->priv;
396 struct device *dev = kproc->dev;
398 mbox_free_channel(kproc->mbox);
416 struct k3_dsp_rproc *kproc = rproc->priv;
417 struct device *dev = kproc->dev;
419 if (!kproc->rmem[0].cpu_addr) {
432 return (struct resource_table *)kproc->rmem[0].cpu_addr;
445 struct k3_dsp_rproc *kproc = rproc->priv;
455 for (i = 0; i < kproc->num_mems; i++) {
456 bus_addr = kproc->mem[i].bus_addr;
457 dev_addr = kproc->mem[i].dev_addr;
458 size = kproc->mem[i].size;
465 va = kproc->mem[i].cpu_addr + offset;
473 va = kproc->mem[i].cpu_addr + offset;
480 for (i = 0; i < kproc->num_rmems; i++) {
481 dev_addr = kproc->rmem[i].dev_addr;
482 size = kproc->rmem[i].size;
486 va = kproc->rmem[i].cpu_addr + offset;
502 struct k3_dsp_rproc *kproc)
504 const struct k3_dsp_dev_data *data = kproc->data;
510 num_mems = kproc->data->num_mems;
511 kproc->mem = devm_kcalloc(kproc->dev, num_mems,
512 sizeof(*kproc->mem), GFP_KERNEL);
513 if (!kproc->mem)
532 kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
534 if (!kproc->mem[i].cpu_addr) {
539 kproc->mem[i].bus_addr = res->start;
540 kproc->mem[i].dev_addr = data->mems[i].dev_addr;
541 kproc->mem[i].size = resource_size(res);
544 data->mems[i].name, &kproc->mem[i].bus_addr,
545 kproc->mem[i].size, kproc->mem[i].cpu_addr,
546 kproc->mem[i].dev_addr);
548 kproc->num_mems = num_mems;
553 static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
555 struct device *dev = kproc->dev;
584 kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
585 if (!kproc->rmem) {
606 kproc->rmem[i].bus_addr = rmem->base;
608 kproc->rmem[i].dev_addr = (u32)rmem->base;
609 kproc->rmem[i].size = rmem->size;
610 kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
611 if (!kproc->rmem[i].cpu_addr) {
619 i + 1, &kproc->rmem[i].bus_addr,
620 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
621 kproc->rmem[i].dev_addr);
623 kproc->num_rmems = num_rmems;
629 iounmap(kproc->rmem[i].cpu_addr);
630 kfree(kproc->rmem);
632 of_reserved_mem_device_release(kproc->dev);
636 static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
640 for (i = 0; i < kproc->num_rmems; i++)
641 iounmap(kproc->rmem[i].cpu_addr);
642 kfree(kproc->rmem);
644 of_reserved_mem_device_release(kproc->dev);
678 struct k3_dsp_rproc *kproc;
697 sizeof(*kproc));
707 kproc = rproc->priv;
708 kproc->rproc = rproc;
709 kproc->dev = dev;
710 kproc->data = data;
712 kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
713 if (IS_ERR(kproc->ti_sci)) {
714 ret = PTR_ERR(kproc->ti_sci);
719 kproc->ti_sci = NULL;
723 ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
729 kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
730 if (IS_ERR(kproc->reset)) {
731 ret = PTR_ERR(kproc->reset);
736 kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
737 if (IS_ERR(kproc->tsp)) {
740 ret = PTR_ERR(kproc->tsp);
744 ret = ti_sci_proc_request(kproc->tsp);
750 ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
754 ret = k3_dsp_reserved_mem_init(kproc);
760 ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
788 ret = reset_control_status(kproc->reset);
795 k3_dsp_rproc_reset(kproc);
807 platform_set_drvdata(pdev, kproc);
812 k3_dsp_reserved_mem_exit(kproc);
814 ret1 = ti_sci_proc_release(kproc->tsp);
818 kfree(kproc->tsp);
820 ret1 = ti_sci_put_handle(kproc->ti_sci);
830 struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
831 struct rproc *rproc = kproc->rproc;
843 rproc_del(kproc->rproc);
845 ret = ti_sci_proc_release(kproc->tsp);
849 kfree(kproc->tsp);
851 ret = ti_sci_put_handle(kproc->ti_sci);
855 k3_dsp_reserved_mem_exit(kproc);
856 rproc_free(kproc->rproc);