Lines Matching refs:pkt

127 	struct cmdq_pkt *pkt;
131 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
132 if (!pkt)
134 pkt->va_base = kzalloc(size, GFP_KERNEL);
135 if (!pkt->va_base) {
136 kfree(pkt);
139 pkt->buf_size = size;
140 pkt->cl = (void *)client;
143 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
147 kfree(pkt->va_base);
148 kfree(pkt);
152 pkt->pa_base = dma_addr;
154 return pkt;
158 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
160 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
162 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
164 kfree(pkt->va_base);
165 kfree(pkt);
169 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
174 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
176 * In the case of allocated buffer size (pkt->buf_size) is used
177 * up, the real required size (pkt->cmdq_buf_size) is still
183 pkt->cmd_buf_size += CMDQ_INST_SIZE;
185 __func__, (u32)pkt->buf_size);
189 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
191 pkt->cmd_buf_size += CMDQ_INST_SIZE;
196 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
205 return cmdq_pkt_append_command(pkt, inst);
209 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
219 err = cmdq_pkt_append_command(pkt, inst);
225 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
231 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
242 return cmdq_pkt_append_command(pkt, inst);
246 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
257 return cmdq_pkt_append_command(pkt, inst);
261 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
269 err = cmdq_pkt_append_command(pkt, inst);
280 return cmdq_pkt_append_command(pkt, inst);
284 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
294 return cmdq_pkt_append_command(pkt, inst);
298 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
306 err = cmdq_pkt_append_command(pkt, inst);
315 return cmdq_pkt_append_command(pkt, inst);
319 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
331 return cmdq_pkt_append_command(pkt, inst);
335 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
346 return cmdq_pkt_append_command(pkt, inst);
350 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
361 return cmdq_pkt_append_command(pkt, inst);
365 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
375 err = cmdq_pkt_append_command(pkt, inst);
381 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
389 err = cmdq_pkt_append_command(pkt, inst);
394 err = cmdq_pkt_poll(pkt, subsys, offset, value);
400 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
408 return cmdq_pkt_append_command(pkt, inst);
412 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
419 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
420 return cmdq_pkt_append_command(pkt, inst);
424 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
432 err = cmdq_pkt_append_command(pkt, inst);
439 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
440 err = cmdq_pkt_append_command(pkt, inst);
448 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
449 struct cmdq_task_cb *cb = &pkt->cb;
450 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
464 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
465 pkt->cmd_buf_size, DMA_TO_DEVICE);
472 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
477 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
479 pkt->cb.cb = cb;
480 pkt->cb.data = data;
481 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
482 pkt->async_cb.data = pkt;
484 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
485 pkt->cmd_buf_size, DMA_TO_DEVICE);
495 err = mbox_send_message(client->chan, pkt);
522 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
528 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);