Lines Matching refs:req
207 struct sba_request *req = NULL;
210 list_for_each_entry(req, &sba->reqs_free_list, node) {
211 if (async_tx_test_ack(&req->tx)) {
212 list_move_tail(&req->node, &sba->reqs_alloc_list);
230 req->flags = SBA_REQUEST_STATE_ALLOCED;
231 req->first = req;
232 INIT_LIST_HEAD(&req->next);
233 atomic_set(&req->next_pending_count, 1);
235 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
236 async_tx_ack(&req->tx);
238 return req;
243 struct sba_request *req)
246 req->flags &= ~SBA_REQUEST_STATE_MASK;
247 req->flags |= SBA_REQUEST_STATE_PENDING;
248 list_move_tail(&req->node, &sba->reqs_pending_list);
255 struct sba_request *req)
262 req->flags &= ~SBA_REQUEST_STATE_MASK;
263 req->flags |= SBA_REQUEST_STATE_ACTIVE;
264 list_move_tail(&req->node, &sba->reqs_active_list);
265 if (req->flags & SBA_REQUEST_FENCE)
272 struct sba_request *req)
275 req->flags &= ~SBA_REQUEST_STATE_MASK;
276 req->flags |= SBA_REQUEST_STATE_ABORTED;
277 list_move_tail(&req->node, &sba->reqs_aborted_list);
284 struct sba_request *req)
287 req->flags &= ~SBA_REQUEST_STATE_MASK;
288 req->flags |= SBA_REQUEST_STATE_FREE;
289 list_move_tail(&req->node, &sba->reqs_free_list);
294 static void sba_free_chained_requests(struct sba_request *req)
298 struct sba_device *sba = req->sba;
302 _sba_free_request(sba, req);
303 list_for_each_entry(nreq, &req->next, next)
310 struct sba_request *req)
313 struct sba_device *sba = req->sba;
317 list_add_tail(&req->next, &first->next);
318 req->first = first;
327 struct sba_request *req, *req1;
332 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
333 _sba_free_request(sba, req);
336 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
337 _sba_abort_request(sba, req);
350 struct sba_request *req, *req1;
355 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
356 _sba_free_request(sba, req);
362 struct sba_request *req)
367 req->msg.error = 0;
368 ret = mbox_send_message(sba->mchan, &req->msg);
375 ret = req->msg.error;
391 struct sba_request *req;
397 req = list_first_entry(&sba->reqs_pending_list,
401 if (!_sba_active_request(sba, req))
405 ret = sba_send_mbox_request(sba, req);
407 _sba_pending_request(sba, req);
416 struct sba_request *req)
420 struct sba_request *nreq, *first = req->first;
460 struct sba_request *req;
466 list_for_each_entry(req, &sba->reqs_free_list, node)
467 if (async_tx_test_ack(&req->tx))
470 list_for_each_entry(req, &sba->reqs_alloc_list, node)
473 list_for_each_entry(req, &sba->reqs_pending_list, node)
476 list_for_each_entry(req, &sba->reqs_active_list, node)
479 list_for_each_entry(req, &sba->reqs_aborted_list, node)
528 struct sba_request *req, *nreq;
534 req = to_sba_request(tx);
539 _sba_pending_request(sba, req);
540 list_for_each_entry(nreq, &req->next, next)
563 static void sba_fillup_interrupt_msg(struct sba_request *req,
569 dma_addr_t resp_dma = req->tx.phys;
575 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
586 cmdsp->data_len = req->sba->hw_resp_size;
592 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
604 if (req->sba->hw_resp_size) {
607 cmdsp->resp_len = req->sba->hw_resp_size;
611 cmdsp->data_len = req->sba->hw_resp_size;
618 msg->ctx = req;
625 struct sba_request *req = NULL;
629 req = sba_alloc_request(sba);
630 if (!req)
637 req->flags |= SBA_REQUEST_FENCE;
640 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
643 req->tx.flags = flags;
644 req->tx.cookie = -EBUSY;
646 return &req->tx;
649 static void sba_fillup_memcpy_msg(struct sba_request *req,
657 dma_addr_t resp_dma = req->tx.phys;
692 if (req->sba->hw_resp_size) {
695 cmdsp->resp_len = req->sba->hw_resp_size;
706 msg->ctx = req;
715 struct sba_request *req = NULL;
718 req = sba_alloc_request(sba);
719 if (!req)
722 req->flags |= SBA_REQUEST_FENCE;
725 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
729 req->tx.flags = flags;
730 req->tx.cookie = -EBUSY;
732 return req;
742 struct sba_request *first = NULL, *req;
748 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
750 if (!req) {
757 sba_chain_request(first, req);
759 first = req;
768 static void sba_fillup_xor_msg(struct sba_request *req,
777 dma_addr_t resp_dma = req->tx.phys;
831 if (req->sba->hw_resp_size) {
834 cmdsp->resp_len = req->sba->hw_resp_size;
845 msg->ctx = req;
854 struct sba_request *req = NULL;
857 req = sba_alloc_request(sba);
858 if (!req)
861 req->flags |= SBA_REQUEST_FENCE;
864 sba_fillup_xor_msg(req, req->cmds, &req->msg,
868 req->tx.flags = flags;
869 req->tx.cookie = -EBUSY;
871 return req;
881 struct sba_request *first = NULL, *req;
891 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
893 if (!req) {
900 sba_chain_request(first, req);
902 first = req;
911 static void sba_fillup_pq_msg(struct sba_request *req,
922 dma_addr_t resp_dma = req->tx.phys;
1014 if (req->sba->hw_resp_size) {
1017 cmdsp->resp_len = req->sba->hw_resp_size;
1041 if (req->sba->hw_resp_size) {
1044 cmdsp->resp_len = req->sba->hw_resp_size;
1056 msg->ctx = req;
1065 struct sba_request *req = NULL;
1068 req = sba_alloc_request(sba);
1069 if (!req)
1072 req->flags |= SBA_REQUEST_FENCE;
1075 sba_fillup_pq_msg(req, dmaf_continue(flags),
1076 req->cmds, &req->msg,
1080 req->tx.flags = flags;
1081 req->tx.cookie = -EBUSY;
1083 return req;
1086 static void sba_fillup_pq_single_msg(struct sba_request *req,
1097 dma_addr_t resp_dma = req->tx.phys;
1174 if (req->sba->hw_resp_size) {
1177 cmdsp->resp_len = req->sba->hw_resp_size;
1202 pos = (dpos < req->sba->max_pq_coefs) ?
1203 dpos : (req->sba->max_pq_coefs - 1);
1231 pos = (dpos < req->sba->max_pq_coefs) ?
1232 dpos : (req->sba->max_pq_coefs - 1);
1295 if (req->sba->hw_resp_size) {
1298 cmdsp->resp_len = req->sba->hw_resp_size;
1310 msg->ctx = req;
1320 struct sba_request *req = NULL;
1323 req = sba_alloc_request(sba);
1324 if (!req)
1327 req->flags |= SBA_REQUEST_FENCE;
1330 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1331 req->cmds, &req->msg, off, len,
1335 req->tx.flags = flags;
1336 req->tx.cookie = -EBUSY;
1338 return req;
1351 struct sba_request *first = NULL, *req;
1384 req = sba_prep_dma_pq_single_req(sba,
1387 if (!req)
1391 sba_chain_request(first, req);
1393 first = req;
1402 req = sba_prep_dma_pq_single_req(sba,
1405 if (!req)
1409 sba_chain_request(first, req);
1411 first = req;
1416 req = sba_prep_dma_pq_req(sba, off,
1419 if (!req)
1423 sba_chain_request(first, req);
1425 first = req;
1445 struct sba_request *req = m->ctx;
1446 struct sba_device *sba = req->sba;
1454 sba_process_received_request(sba, req);
1474 struct sba_request *req = NULL;
1499 req = devm_kzalloc(sba->dev,
1500 struct_size(req, cmds, sba->max_cmd_per_req),
1502 if (!req) {
1506 INIT_LIST_HEAD(&req->node);
1507 req->sba = sba;
1508 req->flags = SBA_REQUEST_STATE_FREE;
1509 INIT_LIST_HEAD(&req->next);
1510 atomic_set(&req->next_pending_count, 0);
1512 req->cmds[j].cmd = 0;
1513 req->cmds[j].cmd_dma = sba->cmds_base +
1515 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1517 req->cmds[j].flags = 0;
1519 memset(&req->msg, 0, sizeof(req->msg));
1520 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1521 async_tx_ack(&req->tx);
1522 req->tx.tx_submit = sba_tx_submit;
1523 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1524 list_add_tail(&req->node, &sba->reqs_free_list);