Lines Matching refs:sba

113 	struct sba_device *sba;
203 static struct sba_request *sba_alloc_request(struct sba_device *sba)
209 spin_lock_irqsave(&sba->reqs_lock, flags);
210 list_for_each_entry(req, &sba->reqs_free_list, node) {
212 list_move_tail(&req->node, &sba->reqs_alloc_list);
217 spin_unlock_irqrestore(&sba->reqs_lock, flags);
226 mbox_client_peek_data(sba->mchan);
235 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
241 /* Note: Must be called with sba->reqs_lock held */
242 static void _sba_pending_request(struct sba_device *sba,
245 lockdep_assert_held(&sba->reqs_lock);
248 list_move_tail(&req->node, &sba->reqs_pending_list);
249 if (list_empty(&sba->reqs_active_list))
250 sba->reqs_fence = false;
253 /* Note: Must be called with sba->reqs_lock held */
254 static bool _sba_active_request(struct sba_device *sba,
257 lockdep_assert_held(&sba->reqs_lock);
258 if (list_empty(&sba->reqs_active_list))
259 sba->reqs_fence = false;
260 if (sba->reqs_fence)
264 list_move_tail(&req->node, &sba->reqs_active_list);
266 sba->reqs_fence = true;
270 /* Note: Must be called with sba->reqs_lock held */
271 static void _sba_abort_request(struct sba_device *sba,
274 lockdep_assert_held(&sba->reqs_lock);
277 list_move_tail(&req->node, &sba->reqs_aborted_list);
278 if (list_empty(&sba->reqs_active_list))
279 sba->reqs_fence = false;
282 /* Note: Must be called with sba->reqs_lock held */
283 static void _sba_free_request(struct sba_device *sba,
286 lockdep_assert_held(&sba->reqs_lock);
289 list_move_tail(&req->node, &sba->reqs_free_list);
290 if (list_empty(&sba->reqs_active_list))
291 sba->reqs_fence = false;
298 struct sba_device *sba = req->sba;
300 spin_lock_irqsave(&sba->reqs_lock, flags);
302 _sba_free_request(sba, req);
304 _sba_free_request(sba, nreq);
306 spin_unlock_irqrestore(&sba->reqs_lock, flags);
313 struct sba_device *sba = req->sba;
315 spin_lock_irqsave(&sba->reqs_lock, flags);
321 spin_unlock_irqrestore(&sba->reqs_lock, flags);
324 static void sba_cleanup_nonpending_requests(struct sba_device *sba)
329 spin_lock_irqsave(&sba->reqs_lock, flags);
332 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
333 _sba_free_request(sba, req);
336 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
337 _sba_abort_request(sba, req);
344 spin_unlock_irqrestore(&sba->reqs_lock, flags);
347 static void sba_cleanup_pending_requests(struct sba_device *sba)
352 spin_lock_irqsave(&sba->reqs_lock, flags);
355 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
356 _sba_free_request(sba, req);
358 spin_unlock_irqrestore(&sba->reqs_lock, flags);
361 static int sba_send_mbox_request(struct sba_device *sba,
368 ret = mbox_send_message(sba->mchan, &req->msg);
370 dev_err(sba->dev, "send message failed with error %d", ret);
377 dev_err(sba->dev, "message error %d", ret);
381 mbox_client_txdone(sba->mchan, ret);
386 /* Note: Must be called with sba->reqs_lock held */
387 static void _sba_process_pending_requests(struct sba_device *sba)
395 while (!list_empty(&sba->reqs_pending_list) && count) {
397 req = list_first_entry(&sba->reqs_pending_list,
401 if (!_sba_active_request(sba, req))
405 ret = sba_send_mbox_request(sba, req);
407 _sba_pending_request(sba, req);
415 static void sba_process_received_request(struct sba_device *sba,
428 spin_lock_irqsave(&sba->reqs_lock, flags);
430 spin_unlock_irqrestore(&sba->reqs_lock, flags);
439 spin_lock_irqsave(&sba->reqs_lock, flags);
443 _sba_free_request(sba, nreq);
447 _sba_free_request(sba, first);
450 _sba_process_pending_requests(sba);
452 spin_unlock_irqrestore(&sba->reqs_lock, flags);
456 static void sba_write_stats_in_seqfile(struct sba_device *sba,
464 spin_lock_irqsave(&sba->reqs_lock, flags);
466 list_for_each_entry(req, &sba->reqs_free_list, node)
470 list_for_each_entry(req, &sba->reqs_alloc_list, node)
473 list_for_each_entry(req, &sba->reqs_pending_list, node)
476 list_for_each_entry(req, &sba->reqs_active_list, node)
479 list_for_each_entry(req, &sba->reqs_aborted_list, node)
482 spin_unlock_irqrestore(&sba->reqs_lock, flags);
484 seq_printf(file, "maximum requests = %d\n", sba->max_req);
515 struct sba_device *sba = to_sba_device(dchan);
518 spin_lock_irqsave(&sba->reqs_lock, flags);
519 _sba_process_pending_requests(sba);
520 spin_unlock_irqrestore(&sba->reqs_lock, flags);
527 struct sba_device *sba;
533 sba = to_sba_device(tx->chan);
537 spin_lock_irqsave(&sba->reqs_lock, flags);
539 _sba_pending_request(sba, req);
541 _sba_pending_request(sba, nreq);
542 spin_unlock_irqrestore(&sba->reqs_lock, flags);
552 struct sba_device *sba = to_sba_device(dchan);
558 mbox_client_peek_data(sba->mchan);
575 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
586 cmdsp->data_len = req->sba->hw_resp_size;
592 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
604 if (req->sba->hw_resp_size) {
607 cmdsp->resp_len = req->sba->hw_resp_size;
611 cmdsp->data_len = req->sba->hw_resp_size;
616 msg->sba.cmds = cmds;
617 msg->sba.cmds_count = cmdsp - cmds;
626 struct sba_device *sba = to_sba_device(dchan);
629 req = sba_alloc_request(sba);
692 if (req->sba->hw_resp_size) {
695 cmdsp->resp_len = req->sba->hw_resp_size;
704 msg->sba.cmds = cmds;
705 msg->sba.cmds_count = cmdsp - cmds;
711 sba_prep_dma_memcpy_req(struct sba_device *sba,
718 req = sba_alloc_request(sba);
741 struct sba_device *sba = to_sba_device(dchan);
746 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
748 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
831 if (req->sba->hw_resp_size) {
834 cmdsp->resp_len = req->sba->hw_resp_size;
843 msg->sba.cmds = cmds;
844 msg->sba.cmds_count = cmdsp - cmds;
850 sba_prep_dma_xor_req(struct sba_device *sba,
857 req = sba_alloc_request(sba);
880 struct sba_device *sba = to_sba_device(dchan);
884 if (unlikely(src_cnt > sba->max_xor_srcs))
889 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
891 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
1014 if (req->sba->hw_resp_size) {
1017 cmdsp->resp_len = req->sba->hw_resp_size;
1041 if (req->sba->hw_resp_size) {
1044 cmdsp->resp_len = req->sba->hw_resp_size;
1054 msg->sba.cmds = cmds;
1055 msg->sba.cmds_count = cmdsp - cmds;
1061 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1068 req = sba_alloc_request(sba);
1174 if (req->sba->hw_resp_size) {
1177 cmdsp->resp_len = req->sba->hw_resp_size;
1202 pos = (dpos < req->sba->max_pq_coefs) ?
1203 dpos : (req->sba->max_pq_coefs - 1);
1231 pos = (dpos < req->sba->max_pq_coefs) ?
1232 dpos : (req->sba->max_pq_coefs - 1);
1295 if (req->sba->hw_resp_size) {
1298 cmdsp->resp_len = req->sba->hw_resp_size;
1308 msg->sba.cmds = cmds;
1309 msg->sba.cmds_count = cmdsp - cmds;
1315 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1323 req = sba_alloc_request(sba);
1350 struct sba_device *sba = to_sba_device(dchan);
1354 if (unlikely(src_cnt > sba->max_pq_srcs))
1357 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1368 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1384 req = sba_prep_dma_pq_single_req(sba,
1402 req = sba_prep_dma_pq_single_req(sba,
1416 req = sba_prep_dma_pq_req(sba, off,
1446 struct sba_device *sba = req->sba;
1450 dev_err(sba->dev, "%s got message with error %d",
1451 dma_chan_name(&sba->dma_chan), m->error);
1454 sba_process_received_request(sba, req);
1461 struct sba_device *sba = dev_get_drvdata(file->private);
1464 sba_write_stats_in_seqfile(sba, file);
1471 static int sba_prealloc_channel_resources(struct sba_device *sba)
1476 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1477 sba->max_resp_pool_size,
1478 &sba->resp_dma_base, GFP_KERNEL);
1479 if (!sba->resp_base)
1482 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1483 sba->max_cmds_pool_size,
1484 &sba->cmds_dma_base, GFP_KERNEL);
1485 if (!sba->cmds_base) {
1490 spin_lock_init(&sba->reqs_lock);
1491 sba->reqs_fence = false;
1492 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1493 INIT_LIST_HEAD(&sba->reqs_pending_list);
1494 INIT_LIST_HEAD(&sba->reqs_active_list);
1495 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1496 INIT_LIST_HEAD(&sba->reqs_free_list);
1498 for (i = 0; i < sba->max_req; i++) {
1499 req = devm_kzalloc(sba->dev,
1500 struct_size(req, cmds, sba->max_cmd_per_req),
1507 req->sba = sba;
1511 for (j = 0; j < sba->max_cmd_per_req; j++) {
1513 req->cmds[j].cmd_dma = sba->cmds_base +
1514 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1515 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1516 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1520 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1523 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1524 list_add_tail(&req->node, &sba->reqs_free_list);
1530 dma_free_coherent(sba->mbox_dev,
1531 sba->max_cmds_pool_size,
1532 sba->cmds_base, sba->cmds_dma_base);
1534 dma_free_coherent(sba->mbox_dev,
1535 sba->max_resp_pool_size,
1536 sba->resp_base, sba->resp_dma_base);
1540 static void sba_freeup_channel_resources(struct sba_device *sba)
1542 dmaengine_terminate_all(&sba->dma_chan);
1543 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1544 sba->cmds_base, sba->cmds_dma_base);
1545 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1546 sba->resp_base, sba->resp_dma_base);
1547 sba->resp_base = NULL;
1548 sba->resp_dma_base = 0;
1551 static int sba_async_register(struct sba_device *sba)
1554 struct dma_device *dma_dev = &sba->dma_dev;
1557 sba->dma_chan.device = dma_dev;
1558 dma_cookie_init(&sba->dma_chan);
1572 dma_dev->dev = sba->mbox_dev;
1591 dma_dev->max_xor = sba->max_xor_srcs;
1597 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1602 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1607 dev_err(sba->dev, "async device register error %d", ret);
1611 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1612 dma_chan_name(&sba->dma_chan),
1624 struct sba_device *sba;
1629 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1630 if (!sba)
1633 sba->dev = &pdev->dev;
1634 platform_set_drvdata(pdev, sba);
1643 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1644 sba->ver = SBA_VER_1;
1645 else if (of_device_is_compatible(sba->dev->of_node,
1646 "brcm,iproc-sba-v2"))
1647 sba->ver = SBA_VER_2;
1652 switch (sba->ver) {
1654 sba->hw_buf_size = 4096;
1655 sba->hw_resp_size = 8;
1656 sba->max_pq_coefs = 6;
1657 sba->max_pq_srcs = 6;
1660 sba->hw_buf_size = 4096;
1661 sba->hw_resp_size = 8;
1662 sba->max_pq_coefs = 30;
1668 sba->max_pq_srcs = 12;
1673 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1674 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1675 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1676 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1677 sba->max_cmds_pool_size = sba->max_req *
1678 sba->max_cmd_per_req * sizeof(u64);
1681 sba->client.dev = &pdev->dev;
1682 sba->client.rx_callback = sba_receive_message;
1683 sba->client.tx_block = false;
1684 sba->client.knows_txdone = true;
1685 sba->client.tx_tout = 0;
1688 sba->mchan = mbox_request_channel(&sba->client, 0);
1689 if (IS_ERR(sba->mchan)) {
1690 ret = PTR_ERR(sba->mchan);
1705 sba->mbox_dev = &mbox_pdev->dev;
1708 ret = sba_prealloc_channel_resources(sba);
1717 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
1720 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
1726 ret = sba_async_register(sba);
1731 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1732 dma_chan_name(&sba->dma_chan), sba->ver+1,
1733 dev_name(sba->mbox_dev));
1738 debugfs_remove_recursive(sba->root);
1739 sba_freeup_channel_resources(sba);
1741 mbox_free_channel(sba->mchan);
1747 struct sba_device *sba = platform_get_drvdata(pdev);
1749 dma_async_device_unregister(&sba->dma_dev);
1751 debugfs_remove_recursive(sba->root);
1753 sba_freeup_channel_resources(sba);
1755 mbox_free_channel(sba->mchan);
1761 { .compatible = "brcm,iproc-sba", },
1762 { .compatible = "brcm,iproc-sba-v2", },
1771 .name = "bcm-sba-raid",