Lines Matching refs:qp
120 struct ntb_transport_qp *qp;
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
468 struct ntb_transport_qp *qp;
472 qp = filp->private_data;
474 if (!qp || !qp->link_is_up)
487 "rx_bytes - \t%llu\n", qp->rx_bytes);
489 "rx_pkts - \t%llu\n", qp->rx_pkts);
491 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
493 "rx_async - \t%llu\n", qp->rx_async);
495 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
497 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
499 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
501 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
503 "rx_buff - \t0x%p\n", qp->rx_buff);
505 "rx_index - \t%u\n", qp->rx_index);
507 "rx_max_entry - \t%u\n", qp->rx_max_entry);
509 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
512 "tx_bytes - \t%llu\n", qp->tx_bytes);
514 "tx_pkts - \t%llu\n", qp->tx_pkts);
516 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
518 "tx_async - \t%llu\n", qp->tx_async);
520 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
522 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
524 "tx_mw - \t0x%p\n", qp->tx_mw);
526 "tx_index (H) - \t%u\n", qp->tx_index);
529 qp->remote_rx_info->entry);
531 "tx_max_entry - \t%u\n", qp->tx_max_entry);
534 ntb_transport_tx_free_entry(qp));
540 qp->tx_dma_chan ? "Yes" : "No");
543 qp->rx_dma_chan ? "Yes" : "No");
546 qp->link_is_up ? "Up" : "Down");
618 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
642 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
645 qp->remote_rx_info = qp->rx_buff + rx_size;
648 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
649 qp->rx_max_entry = rx_size / qp->rx_max_frame;
650 qp->rx_index = 0;
658 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
663 entry->qp = qp;
664 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
665 &qp->rx_free_q);
666 qp->rx_alloc_entry++;
669 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
672 for (i = 0; i < qp->rx_max_entry; i++) {
673 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
678 qp->rx_pkts = 0;
679 qp->tx_pkts = 0;
680 qp->tx_index = 0;
687 struct ntb_transport_qp *qp = dev;
689 tasklet_schedule(&qp->rxc_db_work);
697 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
706 qp->peer_msi_desc.addr_offset =
707 ntb_peer_spad_read(qp->ndev, PIDX, spad);
708 qp->peer_msi_desc.data =
709 ntb_peer_spad_read(qp->ndev, PIDX, spad + 1);
711 dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n",
712 qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data);
714 if (qp->peer_msi_desc.addr_offset) {
715 qp->use_msi = true;
716 dev_info(&qp->ndev->pdev->dev,
724 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
732 dev_warn_once(&qp->ndev->pdev->dev,
737 ntb_spad_write(qp->ndev, spad, 0);
738 ntb_spad_write(qp->ndev, spad + 1, 0);
740 if (!qp->msi_irq) {
741 qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr,
742 KBUILD_MODNAME, qp,
743 &qp->msi_desc);
744 if (qp->msi_irq < 0) {
745 dev_warn(&qp->ndev->pdev->dev,
746 "Unable to allocate MSI interrupt for qp%d\n",
752 rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset);
756 rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data);
760 dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n",
761 qp_num, qp->msi_irq, qp->msi_desc.addr_offset,
762 qp->msi_desc.data);
767 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
914 static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
916 qp->link_is_up = false;
917 qp->active = false;
919 qp->tx_index = 0;
920 qp->rx_index = 0;
921 qp->rx_bytes = 0;
922 qp->rx_pkts = 0;
923 qp->rx_ring_empty = 0;
924 qp->rx_err_no_buf = 0;
925 qp->rx_err_oflow = 0;
926 qp->rx_err_ver = 0;
927 qp->rx_memcpy = 0;
928 qp->rx_async = 0;
929 qp->tx_bytes = 0;
930 qp->tx_pkts = 0;
931 qp->tx_ring_full = 0;
932 qp->tx_err_no_buf = 0;
933 qp->tx_memcpy = 0;
934 qp->tx_async = 0;
937 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
939 ntb_qp_link_context_reset(qp);
940 if (qp->remote_rx_info)
941 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
944 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
946 struct ntb_transport_ctx *nt = qp->transport;
949 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
951 cancel_delayed_work_sync(&qp->link_work);
952 ntb_qp_link_down_reset(qp);
954 if (qp->event_handler)
955 qp->event_handler(qp->cb_data, qp->link_is_up);
960 struct ntb_transport_qp *qp = container_of(work,
963 struct ntb_transport_ctx *nt = qp->transport;
965 ntb_qp_link_cleanup(qp);
968 schedule_delayed_work(&qp->link_work,
972 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
974 schedule_work(&qp->link_cleanup);
979 struct ntb_transport_qp *qp;
988 qp = &nt->qp_vec[i];
989 ntb_qp_link_cleanup(qp);
990 cancel_work_sync(&qp->link_cleanup);
991 cancel_delayed_work_sync(&qp->link_work);
1106 struct ntb_transport_qp *qp = &nt->qp_vec[i];
1111 if (qp->client_ready)
1112 schedule_delayed_work(&qp->link_work, 0);
1133 struct ntb_transport_qp *qp = container_of(work,
1136 struct pci_dev *pdev = qp->ndev->pdev;
1137 struct ntb_transport_ctx *nt = qp->transport;
1144 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
1146 /* query remote spad for qp ready bits */
1150 if (val & BIT(qp->qp_num)) {
1151 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
1152 qp->link_is_up = true;
1153 qp->active = true;
1155 if (qp->event_handler)
1156 qp->event_handler(qp->cb_data, qp->link_is_up);
1158 if (qp->active)
1159 tasklet_schedule(&qp->rxc_db_work);
1161 schedule_delayed_work(&qp->link_work,
1168 struct ntb_transport_qp *qp;
1180 qp = &nt->qp_vec[qp_num];
1181 qp->qp_num = qp_num;
1182 qp->transport = nt;
1183 qp->ndev = nt->ndev;
1184 qp->client_ready = false;
1185 qp->event_handler = NULL;
1186 ntb_qp_link_context_reset(qp);
1202 qp->tx_mw_size = tx_size;
1203 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1204 if (!qp->tx_mw)
1207 qp->tx_mw_phys = mw_base + qp_offset;
1208 if (!qp->tx_mw_phys)
1212 qp->rx_info = qp->tx_mw + tx_size;
1215 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
1216 qp->tx_max_entry = tx_size / qp->tx_max_frame;
1221 snprintf(debugfs_name, 4, "qp%d", qp_num);
1222 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1225 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1226 qp->debugfs_dir, qp,
1229 qp->debugfs_dir = NULL;
1230 qp->debugfs_stats = NULL;
1233 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
1234 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
1236 spin_lock_init(&qp->ntb_rx_q_lock);
1237 spin_lock_init(&qp->ntb_tx_free_q_lock);
1239 INIT_LIST_HEAD(&qp->rx_post_q);
1240 INIT_LIST_HEAD(&qp->rx_pend_q);
1241 INIT_LIST_HEAD(&qp->rx_free_q);
1242 INIT_LIST_HEAD(&qp->tx_free_q);
1244 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1245 (unsigned long)qp);
1413 struct ntb_transport_qp *qp;
1423 /* verify that all the qp's are freed */
1425 qp = &nt->qp_vec[i];
1427 ntb_transport_free_queue(qp);
1428 debugfs_remove_recursive(qp->debugfs_dir);
1446 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1453 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1455 while (!list_empty(&qp->rx_post_q)) {
1456 entry = list_first_entry(&qp->rx_post_q,
1462 iowrite32(entry->rx_index, &qp->rx_info->entry);
1467 list_move_tail(&entry->entry, &qp->rx_free_q);
1469 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1471 if (qp->rx_handler && qp->client_ready)
1472 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1474 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1477 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1496 struct ntb_transport_qp *qp = entry->qp;
1497 void *offset = qp->rx_buff + qp->rx_max_frame *
1498 qp->rx_index;
1501 qp->rx_memcpy++;
1513 ntb_complete_rxc(entry->qp);
1532 struct ntb_transport_qp *qp = entry->qp;
1533 struct dma_chan *chan = qp->rx_dma_chan;
1583 qp->last_cookie = cookie;
1585 qp->rx_async++;
1599 struct ntb_transport_qp *qp = entry->qp;
1600 struct dma_chan *chan = qp->rx_dma_chan;
1614 qp->rx_async++;
1620 qp->rx_memcpy++;
1623 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1629 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1630 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1632 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1633 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1636 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1637 qp->rx_ring_empty++;
1642 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1643 ntb_qp_link_down(qp);
1648 if (hdr->ver != (u32)qp->rx_pkts) {
1649 dev_dbg(&qp->ndev->pdev->dev,
1651 qp->rx_pkts, hdr->ver);
1652 qp->rx_err_ver++;
1656 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1658 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1659 qp->rx_err_no_buf++;
1664 entry->rx_index = qp->rx_index;
1667 dev_dbg(&qp->ndev->pdev->dev,
1670 qp->rx_err_oflow++;
1675 ntb_complete_rxc(qp);
1677 dev_dbg(&qp->ndev->pdev->dev,
1679 qp->rx_index, hdr->ver, hdr->len, entry->len);
1681 qp->rx_bytes += hdr->len;
1682 qp->rx_pkts++;
1689 qp->rx_index++;
1690 qp->rx_index %= qp->rx_max_entry;
1697 struct ntb_transport_qp *qp = (void *)data;
1700 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1701 __func__, qp->qp_num);
1706 for (i = 0; i < qp->rx_max_entry; i++) {
1707 rc = ntb_process_rxc(qp);
1712 if (i && qp->rx_dma_chan)
1713 dma_async_issue_pending(qp->rx_dma_chan);
1715 if (i == qp->rx_max_entry) {
1717 if (qp->active)
1718 tasklet_schedule(&qp->rxc_db_work);
1719 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1721 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1723 ntb_db_read(qp->ndev);
1729 if (qp->active)
1730 tasklet_schedule(&qp->rxc_db_work);
1738 struct ntb_transport_qp *qp = entry->qp;
1753 qp->tx_mw + qp->tx_max_frame *
1758 qp->tx_memcpy++;
1770 if (qp->use_msi)
1771 ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc);
1773 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1780 qp->tx_bytes += entry->len;
1782 if (qp->tx_handler)
1783 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1787 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1808 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1812 struct dma_chan *chan = qp->tx_dma_chan;
1822 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
1867 static void ntb_async_tx(struct ntb_transport_qp *qp,
1871 struct dma_chan *chan = qp->tx_dma_chan;
1875 entry->tx_index = qp->tx_index;
1876 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1877 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1881 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1889 res = ntb_async_tx_submit(qp, entry);
1894 qp->tx_async++;
1900 qp->tx_memcpy++;
1903 static int ntb_process_tx(struct ntb_transport_qp *qp,
1906 if (qp->tx_index == qp->remote_rx_info->entry) {
1907 qp->tx_ring_full++;
1911 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1912 if (qp->tx_handler)
1913 qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
1915 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1916 &qp->tx_free_q);
1920 ntb_async_tx(qp, entry);
1922 qp->tx_index++;
1923 qp->tx_index %= qp->tx_max_entry;
1925 qp->tx_pkts++;
1930 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1932 struct pci_dev *pdev = qp->ndev->pdev;
1936 if (!qp->link_is_up)
1939 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1942 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1956 rc = ntb_process_tx(qp, entry);
1959 qp->qp_num);
1961 ntb_qp_link_down_reset(qp);
1991 struct ntb_transport_qp *qp;
2011 qp = &nt->qp_vec[free_queue];
2012 qp_bit = BIT_ULL(qp->qp_num);
2016 qp->cb_data = data;
2017 qp->rx_handler = handlers->rx_handler;
2018 qp->tx_handler = handlers->tx_handler;
2019 qp->event_handler = handlers->event_handler;
2025 qp->tx_dma_chan =
2028 if (!qp->tx_dma_chan)
2031 qp->rx_dma_chan =
2034 if (!qp->rx_dma_chan)
2037 qp->tx_dma_chan = NULL;
2038 qp->rx_dma_chan = NULL;
2041 qp->tx_mw_dma_addr = 0;
2042 if (qp->tx_dma_chan) {
2043 qp->tx_mw_dma_addr =
2044 dma_map_resource(qp->tx_dma_chan->device->dev,
2045 qp->tx_mw_phys, qp->tx_mw_size,
2047 if (dma_mapping_error(qp->tx_dma_chan->device->dev,
2048 qp->tx_mw_dma_addr)) {
2049 qp->tx_mw_dma_addr = 0;
2055 qp->tx_dma_chan ? "DMA" : "CPU");
2058 qp->rx_dma_chan ? "DMA" : "CPU");
2065 entry->qp = qp;
2066 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
2067 &qp->rx_free_q);
2069 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
2071 for (i = 0; i < qp->tx_max_entry; i++) {
2076 entry->qp = qp;
2077 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2078 &qp->tx_free_q);
2081 ntb_db_clear(qp->ndev, qp_bit);
2082 ntb_db_clear_mask(qp->ndev, qp_bit);
2084 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
2086 return qp;
2089 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
2092 qp->rx_alloc_entry = 0;
2093 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
2095 if (qp->tx_mw_dma_addr)
2096 dma_unmap_resource(qp->tx_dma_chan->device->dev,
2097 qp->tx_mw_dma_addr, qp->tx_mw_size,
2099 if (qp->tx_dma_chan)
2100 dma_release_channel(qp->tx_dma_chan);
2101 if (qp->rx_dma_chan)
2102 dma_release_channel(qp->rx_dma_chan);
2111 * @qp: NTB queue to be freed
2115 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
2121 if (!qp)
2124 pdev = qp->ndev->pdev;
2126 qp->active = false;
2128 if (qp->tx_dma_chan) {
2129 struct dma_chan *chan = qp->tx_dma_chan;
2133 qp->tx_dma_chan = NULL;
2138 dma_sync_wait(chan, qp->last_cookie);
2142 qp->tx_mw_dma_addr, qp->tx_mw_size,
2148 if (qp->rx_dma_chan) {
2149 struct dma_chan *chan = qp->rx_dma_chan;
2153 qp->rx_dma_chan = NULL;
2158 dma_sync_wait(chan, qp->last_cookie);
2163 qp_bit = BIT_ULL(qp->qp_num);
2165 ntb_db_set_mask(qp->ndev, qp_bit);
2166 tasklet_kill(&qp->rxc_db_work);
2168 cancel_delayed_work_sync(&qp->link_work);
2170 qp->cb_data = NULL;
2171 qp->rx_handler = NULL;
2172 qp->tx_handler = NULL;
2173 qp->event_handler = NULL;
2175 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
2178 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
2183 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
2188 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
2191 qp->transport->qp_bitmap_free |= qp_bit;
2193 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
2199 * @qp: NTB queue to be freed
2203 * shutdown of qp.
2207 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
2212 if (!qp || qp->client_ready)
2215 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
2222 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
2230 * @qp: NTB transport layer queue the entry is to be enqueued on
2240 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2245 if (!qp)
2248 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
2260 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
2262 if (qp->active)
2263 tasklet_schedule(&qp->rxc_db_work);
2271 * @qp: NTB transport layer queue the entry is to be enqueued on
2278 * serialize access to the qp.
2282 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2288 if (!qp || !len)
2291 /* If the qp link is down already, just ignore. */
2292 if (!qp->link_is_up)
2295 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
2297 qp->tx_err_no_buf++;
2309 rc = ntb_process_tx(qp, entry);
2311 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2312 &qp->tx_free_q);
2320 * @qp: NTB transport layer queue to be enabled
2324 void ntb_transport_link_up(struct ntb_transport_qp *qp)
2326 if (!qp)
2329 qp->client_ready = true;
2331 if (qp->transport->link_is_up)
2332 schedule_delayed_work(&qp->link_work, 0);
2338 * @qp: NTB transport layer queue to be disabled
2344 void ntb_transport_link_down(struct ntb_transport_qp *qp)
2348 if (!qp)
2351 qp->client_ready = false;
2353 val = ntb_spad_read(qp->ndev, QP_LINKS);
2355 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
2357 if (qp->link_is_up)
2358 ntb_send_link_down(qp);
2360 cancel_delayed_work_sync(&qp->link_work);
2366 * @qp: NTB transport layer queue to be queried
2372 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2374 if (!qp)
2377 return qp->link_is_up;
2382 * ntb_transport_qp_num - Query the qp number
2383 * @qp: NTB transport layer queue to be queried
2385 * Query qp number of the NTB transport queue
2387 * RETURNS: a zero based number specifying the qp number
2389 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2391 if (!qp)
2394 return qp->qp_num;
2399 * ntb_transport_max_size - Query the max payload size of a qp
2400 * @qp: NTB transport layer queue to be queried
2402 * Query the maximum payload size permissible on the given qp
2404 * RETURNS: the max payload size of a qp
2406 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
2412 if (!qp)
2415 rx_chan = qp->rx_dma_chan;
2416 tx_chan = qp->tx_dma_chan;
2422 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2429 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2431 unsigned int head = qp->tx_index;
2432 unsigned int tail = qp->remote_rx_info->entry;
2434 return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
2441 struct ntb_transport_qp *qp;
2455 qp = &nt->qp_vec[qp_num];
2457 if (qp->active)
2458 tasklet_schedule(&qp->rxc_db_work);