Lines Matching refs:nt

218 	struct ntb_transport_ctx *nt;
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
326 static int ntb_bus_init(struct ntb_transport_ctx *nt)
328 list_add_tail(&nt->entry, &ntb_transport_list);
332 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
336 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
343 list_del(&nt->entry);
363 struct ntb_transport_ctx *nt;
365 list_for_each_entry(nt, &ntb_transport_list, entry)
366 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
384 struct ntb_transport_ctx *nt;
391 list_for_each_entry(nt, &ntb_transport_list, entry) {
394 node = dev_to_node(&nt->ndev->dev);
409 dev->parent = &nt->ndev->dev;
417 list_add_tail(&client_dev->entry, &nt->client_devs);
613 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
616 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
618 struct ntb_dev *ndev = nt->ndev;
625 mw_count = nt->mw_count;
626 qp_count = nt->qp_count;
628 mw_num = QP_TO_MW(nt, qp_num);
629 mw = &nt->mw_vec[mw_num];
692 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt,
695 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
696 int spad = qp_num * 2 + nt->msi_spad_offset;
698 if (!nt->use_msi)
701 if (spad >= ntb_spad_count(nt->ndev))
719 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt,
722 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
723 int spad = qp_num * 2 + nt->msi_spad_offset;
726 if (!nt->use_msi)
729 if (spad >= ntb_spad_count(nt->ndev)) {
765 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
768 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt)
772 dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed");
774 for (i = 0; i < nt->qp_count; i++)
775 ntb_transport_setup_qp_peer_msi(nt, i);
780 struct ntb_transport_ctx *nt = data;
783 dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed");
785 for (i = 0; i < nt->qp_count; i++)
786 ntb_transport_setup_qp_msi(nt, i);
788 ntb_peer_db_set(nt->ndev, nt->msi_db_mask);
791 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
793 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
794 struct pci_dev *pdev = nt->ndev->pdev;
799 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
853 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
856 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
857 struct pci_dev *pdev = nt->ndev->pdev;
866 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
879 ntb_free_mw(nt, num_mw);
901 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
905 ntb_free_mw(nt, num_mw);
944 struct ntb_transport_ctx *nt = qp->transport;
945 struct pci_dev *pdev = nt->ndev->pdev;
961 struct ntb_transport_ctx *nt = qp->transport;
965 if (nt->link_is_up)
975 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
981 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
984 for (i = 0; i < nt->qp_count; i++)
986 qp = &nt->qp_vec[i];
992 if (!nt->link_is_up)
993 cancel_delayed_work_sync(&nt->link_work);
995 for (i = 0; i < nt->mw_count; i++)
996 ntb_free_mw(nt, i);
1002 count = ntb_spad_count(nt->ndev);
1004 ntb_spad_write(nt->ndev, i, 0);
1009 struct ntb_transport_ctx *nt =
1012 ntb_transport_link_cleanup(nt);
1017 struct ntb_transport_ctx *nt = data;
1019 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
1020 schedule_delayed_work(&nt->link_work, 0);
1022 schedule_work(&nt->link_cleanup);
1027 struct ntb_transport_ctx *nt =
1029 struct ntb_dev *ndev = nt->ndev;
1037 if (nt->use_msi) {
1043 nt->use_msi = false;
1047 for (i = 0; i < nt->qp_count; i++)
1048 ntb_transport_setup_qp_msi(nt, i);
1050 for (i = 0; i < nt->mw_count; i++) {
1051 size = nt->mw_vec[i].phys_size;
1063 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
1065 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
1077 if (val != nt->qp_count)
1082 if (val != nt->mw_count)
1085 for (i = 0; i < nt->mw_count; i++) {
1096 rc = ntb_set_mw(nt, i, val64);
1101 nt->link_is_up = true;
1103 for (i = 0; i < nt->qp_count; i++) {
1104 struct ntb_transport_qp *qp = &nt->qp_vec[i];
1106 ntb_transport_setup_qp_mw(nt, i);
1107 ntb_transport_setup_qp_peer_msi(nt, i);
1116 for (i = 0; i < nt->mw_count; i++)
1117 ntb_free_mw(nt, i);
1125 schedule_delayed_work(&nt->link_work,
1135 struct ntb_transport_ctx *nt = qp->transport;
1138 WARN_ON(!nt->link_is_up);
1140 val = ntb_spad_read(nt->ndev, QP_LINKS);
1142 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
1158 } else if (nt->link_is_up)
1163 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1173 mw_count = nt->mw_count;
1174 qp_count = nt->qp_count;
1176 mw_num = QP_TO_MW(nt, qp_num);
1178 qp = &nt->qp_vec[qp_num];
1180 qp->transport = nt;
1181 qp->ndev = nt->ndev;
1191 mw_base = nt->mw_vec[mw_num].phys_addr;
1192 mw_size = nt->mw_vec[mw_num].phys_size;
1201 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1216 if (nt->debugfs_node_dir) {
1221 nt->debugfs_node_dir);
1250 struct ntb_transport_ctx *nt;
1276 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1277 if (!nt)
1280 nt->ndev = ndev;
1290 nt->use_msi = true;
1299 nt->mw_count = 0;
1305 nt->mw_count = min(mw_count, max_mw_count_for_spads);
1307 nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH;
1309 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
1311 if (!nt->mw_vec) {
1317 mw = &nt->mw_vec[i];
1339 if (nt->use_msi) {
1341 nt->msi_db_mask = 1 << qp_count;
1342 ntb_db_clear_mask(ndev, nt->msi_db_mask);
1347 else if (nt->mw_count < qp_count)
1348 qp_count = nt->mw_count;
1352 nt->qp_count = qp_count;
1353 nt->qp_bitmap = qp_bitmap;
1354 nt->qp_bitmap_free = qp_bitmap;
1356 nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
1358 if (!nt->qp_vec) {
1364 nt->debugfs_node_dir =
1370 rc = ntb_transport_init_queue(nt, i);
1375 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1376 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1378 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1382 INIT_LIST_HEAD(&nt->client_devs);
1383 rc = ntb_bus_init(nt);
1387 nt->link_is_up = false;
1396 kfree(nt->qp_vec);
1399 mw = &nt->mw_vec[i];
1402 kfree(nt->mw_vec);
1404 kfree(nt);
1410 struct ntb_transport_ctx *nt = ndev->ctx;
1415 ntb_transport_link_cleanup(nt);
1416 cancel_work_sync(&nt->link_cleanup);
1417 cancel_delayed_work_sync(&nt->link_work);
1419 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1422 for (i = 0; i < nt->qp_count; i++) {
1423 qp = &nt->qp_vec[i];
1432 ntb_bus_remove(nt);
1434 for (i = nt->mw_count; i--; ) {
1435 ntb_free_mw(nt, i);
1436 iounmap(nt->mw_vec[i].vbase);
1439 kfree(nt->qp_vec);
1440 kfree(nt->mw_vec);
1441 kfree(nt);
1987 struct ntb_transport_ctx *nt;
1998 nt = ndev->ctx;
2002 free_queue = ffs(nt->qp_bitmap_free);
2009 qp = &nt->qp_vec[free_queue];
2012 nt->qp_bitmap_free &= ~qp_bit;
2101 nt->qp_bitmap_free |= qp_bit;
2438 struct ntb_transport_ctx *nt = data;
2443 if (ntb_db_read(nt->ndev) & nt->msi_db_mask) {
2444 ntb_transport_msi_peer_desc_changed(nt);
2445 ntb_db_clear(nt->ndev, nt->msi_db_mask);
2448 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2449 ntb_db_vector_mask(nt->ndev, vector));
2453 qp = &nt->qp_vec[qp_num];