Lines Matching refs:sba

105 	struct sba_device *sba;
195 static struct sba_request *sba_alloc_request(struct sba_device *sba)
201 spin_lock_irqsave(&sba->reqs_lock, flags);
202 list_for_each_entry(req, &sba->reqs_free_list, node) {
204 list_move_tail(&req->node, &sba->reqs_alloc_list);
209 spin_unlock_irqrestore(&sba->reqs_lock, flags);
218 mbox_client_peek_data(sba->mchan);
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
233 /* Note: Must be called with sba->reqs_lock held */
234 static void _sba_pending_request(struct sba_device *sba,
237 lockdep_assert_held(&sba->reqs_lock);
240 list_move_tail(&req->node, &sba->reqs_pending_list);
241 if (list_empty(&sba->reqs_active_list))
242 sba->reqs_fence = false;
245 /* Note: Must be called with sba->reqs_lock held */
246 static bool _sba_active_request(struct sba_device *sba,
249 lockdep_assert_held(&sba->reqs_lock);
250 if (list_empty(&sba->reqs_active_list))
251 sba->reqs_fence = false;
252 if (sba->reqs_fence)
256 list_move_tail(&req->node, &sba->reqs_active_list);
258 sba->reqs_fence = true;
262 /* Note: Must be called with sba->reqs_lock held */
263 static void _sba_abort_request(struct sba_device *sba,
266 lockdep_assert_held(&sba->reqs_lock);
269 list_move_tail(&req->node, &sba->reqs_aborted_list);
270 if (list_empty(&sba->reqs_active_list))
271 sba->reqs_fence = false;
274 /* Note: Must be called with sba->reqs_lock held */
275 static void _sba_free_request(struct sba_device *sba,
278 lockdep_assert_held(&sba->reqs_lock);
281 list_move_tail(&req->node, &sba->reqs_free_list);
282 if (list_empty(&sba->reqs_active_list))
283 sba->reqs_fence = false;
290 struct sba_device *sba = req->sba;
292 spin_lock_irqsave(&sba->reqs_lock, flags);
294 _sba_free_request(sba, req);
296 _sba_free_request(sba, nreq);
298 spin_unlock_irqrestore(&sba->reqs_lock, flags);
305 struct sba_device *sba = req->sba;
307 spin_lock_irqsave(&sba->reqs_lock, flags);
313 spin_unlock_irqrestore(&sba->reqs_lock, flags);
316 static void sba_cleanup_nonpending_requests(struct sba_device *sba)
321 spin_lock_irqsave(&sba->reqs_lock, flags);
324 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
325 _sba_free_request(sba, req);
328 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
329 _sba_abort_request(sba, req);
336 spin_unlock_irqrestore(&sba->reqs_lock, flags);
339 static void sba_cleanup_pending_requests(struct sba_device *sba)
344 spin_lock_irqsave(&sba->reqs_lock, flags);
347 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
348 _sba_free_request(sba, req);
350 spin_unlock_irqrestore(&sba->reqs_lock, flags);
353 static int sba_send_mbox_request(struct sba_device *sba,
360 ret = mbox_send_message(sba->mchan, &req->msg);
362 dev_err(sba->dev, "send message failed with error %d", ret);
369 dev_err(sba->dev, "message error %d", ret);
373 mbox_client_txdone(sba->mchan, ret);
378 /* Note: Must be called with sba->reqs_lock held */
379 static void _sba_process_pending_requests(struct sba_device *sba)
387 while (!list_empty(&sba->reqs_pending_list) && count) {
389 req = list_first_entry(&sba->reqs_pending_list,
393 if (!_sba_active_request(sba, req))
397 ret = sba_send_mbox_request(sba, req);
399 _sba_pending_request(sba, req);
407 static void sba_process_received_request(struct sba_device *sba,
420 spin_lock_irqsave(&sba->reqs_lock, flags);
422 spin_unlock_irqrestore(&sba->reqs_lock, flags);
431 spin_lock_irqsave(&sba->reqs_lock, flags);
435 _sba_free_request(sba, nreq);
439 _sba_free_request(sba, first);
442 _sba_process_pending_requests(sba);
444 spin_unlock_irqrestore(&sba->reqs_lock, flags);
448 static void sba_write_stats_in_seqfile(struct sba_device *sba,
456 spin_lock_irqsave(&sba->reqs_lock, flags);
458 list_for_each_entry(req, &sba->reqs_free_list, node)
462 list_for_each_entry(req, &sba->reqs_alloc_list, node)
465 list_for_each_entry(req, &sba->reqs_pending_list, node)
468 list_for_each_entry(req, &sba->reqs_active_list, node)
471 list_for_each_entry(req, &sba->reqs_aborted_list, node)
474 spin_unlock_irqrestore(&sba->reqs_lock, flags);
476 seq_printf(file, "maximum requests = %d\n", sba->max_req);
507 struct sba_device *sba = to_sba_device(dchan);
510 spin_lock_irqsave(&sba->reqs_lock, flags);
511 _sba_process_pending_requests(sba);
512 spin_unlock_irqrestore(&sba->reqs_lock, flags);
519 struct sba_device *sba;
525 sba = to_sba_device(tx->chan);
529 spin_lock_irqsave(&sba->reqs_lock, flags);
531 _sba_pending_request(sba, req);
533 _sba_pending_request(sba, nreq);
534 spin_unlock_irqrestore(&sba->reqs_lock, flags);
544 struct sba_device *sba = to_sba_device(dchan);
550 mbox_client_peek_data(sba->mchan);
567 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
578 cmdsp->data_len = req->sba->hw_resp_size;
584 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
596 if (req->sba->hw_resp_size) {
599 cmdsp->resp_len = req->sba->hw_resp_size;
603 cmdsp->data_len = req->sba->hw_resp_size;
608 msg->sba.cmds = cmds;
609 msg->sba.cmds_count = cmdsp - cmds;
618 struct sba_device *sba = to_sba_device(dchan);
621 req = sba_alloc_request(sba);
684 if (req->sba->hw_resp_size) {
687 cmdsp->resp_len = req->sba->hw_resp_size;
696 msg->sba.cmds = cmds;
697 msg->sba.cmds_count = cmdsp - cmds;
703 sba_prep_dma_memcpy_req(struct sba_device *sba,
710 req = sba_alloc_request(sba);
733 struct sba_device *sba = to_sba_device(dchan);
738 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
740 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
823 if (req->sba->hw_resp_size) {
826 cmdsp->resp_len = req->sba->hw_resp_size;
835 msg->sba.cmds = cmds;
836 msg->sba.cmds_count = cmdsp - cmds;
842 sba_prep_dma_xor_req(struct sba_device *sba,
849 req = sba_alloc_request(sba);
872 struct sba_device *sba = to_sba_device(dchan);
876 if (unlikely(src_cnt > sba->max_xor_srcs))
881 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
883 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
1006 if (req->sba->hw_resp_size) {
1009 cmdsp->resp_len = req->sba->hw_resp_size;
1033 if (req->sba->hw_resp_size) {
1036 cmdsp->resp_len = req->sba->hw_resp_size;
1046 msg->sba.cmds = cmds;
1047 msg->sba.cmds_count = cmdsp - cmds;
1053 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1060 req = sba_alloc_request(sba);
1166 if (req->sba->hw_resp_size) {
1169 cmdsp->resp_len = req->sba->hw_resp_size;
1194 pos = (dpos < req->sba->max_pq_coefs) ?
1195 dpos : (req->sba->max_pq_coefs - 1);
1223 pos = (dpos < req->sba->max_pq_coefs) ?
1224 dpos : (req->sba->max_pq_coefs - 1);
1287 if (req->sba->hw_resp_size) {
1290 cmdsp->resp_len = req->sba->hw_resp_size;
1300 msg->sba.cmds = cmds;
1301 msg->sba.cmds_count = cmdsp - cmds;
1307 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1315 req = sba_alloc_request(sba);
1342 struct sba_device *sba = to_sba_device(dchan);
1346 if (unlikely(src_cnt > sba->max_pq_srcs))
1349 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1360 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1376 req = sba_prep_dma_pq_single_req(sba,
1394 req = sba_prep_dma_pq_single_req(sba,
1408 req = sba_prep_dma_pq_req(sba, off,
1438 struct sba_device *sba = req->sba;
1442 dev_err(sba->dev, "%s got message with error %d",
1443 dma_chan_name(&sba->dma_chan), m->error);
1446 sba_process_received_request(sba, req);
1453 struct sba_device *sba = dev_get_drvdata(file->private);
1456 sba_write_stats_in_seqfile(sba, file);
1463 static int sba_prealloc_channel_resources(struct sba_device *sba)
1468 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1469 sba->max_resp_pool_size,
1470 &sba->resp_dma_base, GFP_KERNEL);
1471 if (!sba->resp_base)
1474 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1475 sba->max_cmds_pool_size,
1476 &sba->cmds_dma_base, GFP_KERNEL);
1477 if (!sba->cmds_base) {
1482 spin_lock_init(&sba->reqs_lock);
1483 sba->reqs_fence = false;
1484 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1485 INIT_LIST_HEAD(&sba->reqs_pending_list);
1486 INIT_LIST_HEAD(&sba->reqs_active_list);
1487 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1488 INIT_LIST_HEAD(&sba->reqs_free_list);
1490 for (i = 0; i < sba->max_req; i++) {
1491 req = devm_kzalloc(sba->dev,
1492 struct_size(req, cmds, sba->max_cmd_per_req),
1499 req->sba = sba;
1503 for (j = 0; j < sba->max_cmd_per_req; j++) {
1505 req->cmds[j].cmd_dma = sba->cmds_base +
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1507 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1508 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1512 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1515 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1516 list_add_tail(&req->node, &sba->reqs_free_list);
1522 dma_free_coherent(sba->mbox_dev,
1523 sba->max_cmds_pool_size,
1524 sba->cmds_base, sba->cmds_dma_base);
1526 dma_free_coherent(sba->mbox_dev,
1527 sba->max_resp_pool_size,
1528 sba->resp_base, sba->resp_dma_base);
1532 static void sba_freeup_channel_resources(struct sba_device *sba)
1534 dmaengine_terminate_all(&sba->dma_chan);
1535 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1536 sba->cmds_base, sba->cmds_dma_base);
1537 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1538 sba->resp_base, sba->resp_dma_base);
1539 sba->resp_base = NULL;
1540 sba->resp_dma_base = 0;
1543 static int sba_async_register(struct sba_device *sba)
1546 struct dma_device *dma_dev = &sba->dma_dev;
1549 sba->dma_chan.device = dma_dev;
1550 dma_cookie_init(&sba->dma_chan);
1564 dma_dev->dev = sba->mbox_dev;
1583 dma_dev->max_xor = sba->max_xor_srcs;
1589 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1594 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1599 dev_err(sba->dev, "async device register error %d", ret);
1603 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1604 dma_chan_name(&sba->dma_chan),
1616 struct sba_device *sba;
1621 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1622 if (!sba)
1625 sba->dev = &pdev->dev;
1626 platform_set_drvdata(pdev, sba);
1635 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1636 sba->ver = SBA_VER_1;
1637 else if (of_device_is_compatible(sba->dev->of_node,
1638 "brcm,iproc-sba-v2"))
1639 sba->ver = SBA_VER_2;
1644 switch (sba->ver) {
1646 sba->hw_buf_size = 4096;
1647 sba->hw_resp_size = 8;
1648 sba->max_pq_coefs = 6;
1649 sba->max_pq_srcs = 6;
1652 sba->hw_buf_size = 4096;
1653 sba->hw_resp_size = 8;
1654 sba->max_pq_coefs = 30;
1660 sba->max_pq_srcs = 12;
1665 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1666 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1667 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1668 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1669 sba->max_cmds_pool_size = sba->max_req *
1670 sba->max_cmd_per_req * sizeof(u64);
1673 sba->client.dev = &pdev->dev;
1674 sba->client.rx_callback = sba_receive_message;
1675 sba->client.tx_block = false;
1676 sba->client.knows_txdone = true;
1677 sba->client.tx_tout = 0;
1680 sba->mchan = mbox_request_channel(&sba->client, 0);
1681 if (IS_ERR(sba->mchan)) {
1682 ret = PTR_ERR(sba->mchan);
1697 sba->mbox_dev = &mbox_pdev->dev;
1700 ret = sba_prealloc_channel_resources(sba);
1709 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
1712 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
1718 ret = sba_async_register(sba);
1723 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1724 dma_chan_name(&sba->dma_chan), sba->ver+1,
1725 dev_name(sba->mbox_dev));
1730 debugfs_remove_recursive(sba->root);
1731 sba_freeup_channel_resources(sba);
1733 mbox_free_channel(sba->mchan);
1739 struct sba_device *sba = platform_get_drvdata(pdev);
1741 dma_async_device_unregister(&sba->dma_dev);
1743 debugfs_remove_recursive(sba->root);
1745 sba_freeup_channel_resources(sba);
1747 mbox_free_channel(sba->mchan);
1753 { .compatible = "brcm,iproc-sba", },
1754 { .compatible = "brcm,iproc-sba-v2", },
1763 .name = "bcm-sba-raid",