Lines Matching defs:dbc
909 if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
916 list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
926 struct dma_bridge_chan *dbc;
1001 dbc = &qdev->dbc[args->hdr.dbc_id];
1002 rcu_id = srcu_read_lock(&dbc->ch_lock);
1003 if (dbc->usr != usr) {
1019 bo->dbc = dbc;
1020 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1031 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1046 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1053 avail += dbc->nelem;
1060 if (tail + slice->nents > dbc->nelem) {
1061 avail = dbc->nelem - tail;
1063 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1068 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
1070 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1074 *ptail = (tail + slice->nents) % dbc->nelem;
1087 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1100 avail += dbc->nelem;
1124 if (tail + first_n > dbc->nelem) {
1125 avail = dbc->nelem - tail;
1127 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1132 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
1134 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1142 last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
1154 *ptail = (tail + first_n + 1) % dbc->nelem;
1161 bool is_partial, struct dma_bridge_chan *dbc, u32 head,
1197 spin_lock_irqsave(&dbc->xfer_lock, flags);
1201 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1206 bo->req_id = dbc->next_req_id++;
1228 dbc->id, head, tail);
1230 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
1233 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1238 list_add_tail(&bo->xfer_list, &dbc->xfer_list);
1239 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1249 spin_lock_irqsave(&dbc->xfer_lock, flags);
1250 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
1254 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1296 struct dma_bridge_chan *dbc;
1348 dbc = &qdev->dbc[args->hdr.dbc_id];
1350 rcu_id = srcu_read_lock(&dbc->ch_lock);
1351 if (!dbc->usr || dbc->usr->handle != usr->handle) {
1356 head = readl(dbc->dbc_base + REQHP_OFF);
1357 tail = readl(dbc->dbc_base + REQTP_OFF);
1365 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
1367 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
1374 writel(tail, dbc->dbc_base + REQTP_OFF);
1380 schedule_work(&dbc->poll_work);
1383 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1407 * Each dbc has a completion queue. Entries in the queue correspond to DMA
1438 struct dma_bridge_chan *dbc = data;
1443 rcu_id = srcu_read_lock(&dbc->ch_lock);
1445 if (!dbc->usr) {
1446 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1450 head = readl(dbc->dbc_base + RSPHP_OFF);
1452 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1456 tail = readl(dbc->dbc_base + RSPTP_OFF);
1458 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1463 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1468 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1474 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
1480 rcu_id = srcu_read_lock(&dbc->ch_lock);
1483 if (dbc->qdev->in_reset) {
1484 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1487 if (!dbc->usr) {
1488 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1491 spin_lock_irqsave(&dbc->xfer_lock, flags);
1492 if (list_empty(&dbc->xfer_list)) {
1493 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1494 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1497 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1499 head = readl(dbc->dbc_base + RSPHP_OFF);
1501 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1505 tail = readl(dbc->dbc_base + RSPTP_OFF);
1507 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1512 irq_wake_thread(dbc->irq, dbc);
1513 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1524 struct dma_bridge_chan *dbc = data;
1537 rcu_id = srcu_read_lock(&dbc->ch_lock);
1539 head = readl(dbc->dbc_base + RSPHP_OFF);
1543 qdev = dbc->qdev;
1555 if (!dbc->usr)
1558 tail = readl(dbc->dbc_base + RSPTP_OFF);
1576 rsp = dbc->rsp_q_base + head * sizeof(*rsp);
1581 spin_lock_irqsave(&dbc->xfer_lock, flags);
1588 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
1610 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1611 head = (head + 1) % dbc->nelem;
1618 writel(head, dbc->dbc_base + RSPHP_OFF);
1627 schedule_work(&dbc->poll_work);
1629 tail = readl(dbc->dbc_base + RSPTP_OFF);
1635 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1639 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1643 schedule_work(&dbc->poll_work);
1652 struct dma_bridge_chan *dbc;
1683 dbc = &qdev->dbc[args->dbc_id];
1685 rcu_id = srcu_read_lock(&dbc->ch_lock);
1686 if (dbc->usr != usr) {
1708 if (!dbc->usr)
1714 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1800 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
1805 spin_lock_irqsave(&dbc->xfer_lock, flags);
1806 while (!list_empty(&dbc->xfer_list)) {
1807 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
1810 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1814 spin_lock_irqsave(&dbc->xfer_lock, flags);
1816 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1821 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
1824 qdev->dbc[dbc_id].usr = NULL;
1825 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
1839 qdev->dbc[dbc_id].usr = usr;
1844 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1846 dbc->usr = NULL;
1847 empty_xfer_list(qdev, dbc);
1848 synchronize_srcu(&dbc->ch_lock);
1853 empty_xfer_list(qdev, dbc);
1860 struct dma_bridge_chan *dbc;
1862 dbc = &qdev->dbc[dbc_id];
1863 if (!dbc->in_use)
1868 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
1869 dbc->total_size = 0;
1870 dbc->req_q_base = NULL;
1871 dbc->dma_addr = 0;
1872 dbc->nelem = 0;
1873 dbc->usr = NULL;
1875 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
1882 bo->dbc = NULL;
1896 dbc->in_use = false;
1897 wake_up(&dbc->dbc_release);