Lines Matching refs:nt

218 	struct ntb_transport_ctx *nt;
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
328 static int ntb_bus_init(struct ntb_transport_ctx *nt)
330 list_add_tail(&nt->entry, &ntb_transport_list);
334 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
338 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
345 list_del(&nt->entry);
365 struct ntb_transport_ctx *nt;
367 list_for_each_entry(nt, &ntb_transport_list, entry)
368 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
386 struct ntb_transport_ctx *nt;
393 list_for_each_entry(nt, &ntb_transport_list, entry) {
396 node = dev_to_node(&nt->ndev->dev);
411 dev->parent = &nt->ndev->dev;
419 list_add_tail(&client_dev->entry, &nt->client_devs);
615 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
618 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
620 struct ntb_dev *ndev = nt->ndev;
627 mw_count = nt->mw_count;
628 qp_count = nt->qp_count;
630 mw_num = QP_TO_MW(nt, qp_num);
631 mw = &nt->mw_vec[mw_num];
694 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt,
697 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
698 int spad = qp_num * 2 + nt->msi_spad_offset;
700 if (!nt->use_msi)
703 if (spad >= ntb_spad_count(nt->ndev))
721 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt,
724 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
725 int spad = qp_num * 2 + nt->msi_spad_offset;
728 if (!nt->use_msi)
731 if (spad >= ntb_spad_count(nt->ndev)) {
767 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
770 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt)
774 dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed");
776 for (i = 0; i < nt->qp_count; i++)
777 ntb_transport_setup_qp_peer_msi(nt, i);
782 struct ntb_transport_ctx *nt = data;
785 dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed");
787 for (i = 0; i < nt->qp_count; i++)
788 ntb_transport_setup_qp_msi(nt, i);
790 ntb_peer_db_set(nt->ndev, nt->msi_db_mask);
793 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
795 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
796 struct pci_dev *pdev = nt->ndev->pdev;
801 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
855 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
858 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
859 struct pci_dev *pdev = nt->ndev->pdev;
868 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
881 ntb_free_mw(nt, num_mw);
903 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
907 ntb_free_mw(nt, num_mw);
946 struct ntb_transport_ctx *nt = qp->transport;
947 struct pci_dev *pdev = nt->ndev->pdev;
963 struct ntb_transport_ctx *nt = qp->transport;
967 if (nt->link_is_up)
977 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
983 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
986 for (i = 0; i < nt->qp_count; i++)
988 qp = &nt->qp_vec[i];
994 if (!nt->link_is_up)
995 cancel_delayed_work_sync(&nt->link_work);
997 for (i = 0; i < nt->mw_count; i++)
998 ntb_free_mw(nt, i);
1004 count = ntb_spad_count(nt->ndev);
1006 ntb_spad_write(nt->ndev, i, 0);
1011 struct ntb_transport_ctx *nt =
1014 ntb_transport_link_cleanup(nt);
1019 struct ntb_transport_ctx *nt = data;
1021 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
1022 schedule_delayed_work(&nt->link_work, 0);
1024 schedule_work(&nt->link_cleanup);
1029 struct ntb_transport_ctx *nt =
1031 struct ntb_dev *ndev = nt->ndev;
1039 if (nt->use_msi) {
1045 nt->use_msi = false;
1049 for (i = 0; i < nt->qp_count; i++)
1050 ntb_transport_setup_qp_msi(nt, i);
1052 for (i = 0; i < nt->mw_count; i++) {
1053 size = nt->mw_vec[i].phys_size;
1065 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
1067 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
1079 if (val != nt->qp_count)
1084 if (val != nt->mw_count)
1087 for (i = 0; i < nt->mw_count; i++) {
1098 rc = ntb_set_mw(nt, i, val64);
1103 nt->link_is_up = true;
1105 for (i = 0; i < nt->qp_count; i++) {
1106 struct ntb_transport_qp *qp = &nt->qp_vec[i];
1108 ntb_transport_setup_qp_mw(nt, i);
1109 ntb_transport_setup_qp_peer_msi(nt, i);
1118 for (i = 0; i < nt->mw_count; i++)
1119 ntb_free_mw(nt, i);
1127 schedule_delayed_work(&nt->link_work,
1137 struct ntb_transport_ctx *nt = qp->transport;
1140 WARN_ON(!nt->link_is_up);
1142 val = ntb_spad_read(nt->ndev, QP_LINKS);
1144 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
1160 } else if (nt->link_is_up)
1165 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1175 mw_count = nt->mw_count;
1176 qp_count = nt->qp_count;
1178 mw_num = QP_TO_MW(nt, qp_num);
1180 qp = &nt->qp_vec[qp_num];
1182 qp->transport = nt;
1183 qp->ndev = nt->ndev;
1193 mw_base = nt->mw_vec[mw_num].phys_addr;
1194 mw_size = nt->mw_vec[mw_num].phys_size;
1203 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1218 if (nt->debugfs_node_dir) {
1223 nt->debugfs_node_dir);
1252 struct ntb_transport_ctx *nt;
1278 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1279 if (!nt)
1282 nt->ndev = ndev;
1292 nt->use_msi = true;
1301 nt->mw_count = 0;
1307 nt->mw_count = min(mw_count, max_mw_count_for_spads);
1309 nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH;
1311 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
1313 if (!nt->mw_vec) {
1319 mw = &nt->mw_vec[i];
1341 if (nt->use_msi) {
1343 nt->msi_db_mask = 1 << qp_count;
1344 ntb_db_clear_mask(ndev, nt->msi_db_mask);
1349 else if (nt->mw_count < qp_count)
1350 qp_count = nt->mw_count;
1354 nt->qp_count = qp_count;
1355 nt->qp_bitmap = qp_bitmap;
1356 nt->qp_bitmap_free = qp_bitmap;
1358 nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
1360 if (!nt->qp_vec) {
1366 nt->debugfs_node_dir =
1372 rc = ntb_transport_init_queue(nt, i);
1377 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1378 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1380 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1384 INIT_LIST_HEAD(&nt->client_devs);
1385 rc = ntb_bus_init(nt);
1389 nt->link_is_up = false;
1398 kfree(nt->qp_vec);
1401 mw = &nt->mw_vec[i];
1404 kfree(nt->mw_vec);
1406 kfree(nt);
1412 struct ntb_transport_ctx *nt = ndev->ctx;
1417 ntb_transport_link_cleanup(nt);
1418 cancel_work_sync(&nt->link_cleanup);
1419 cancel_delayed_work_sync(&nt->link_work);
1421 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1424 for (i = 0; i < nt->qp_count; i++) {
1425 qp = &nt->qp_vec[i];
1434 ntb_bus_remove(nt);
1436 for (i = nt->mw_count; i--; ) {
1437 ntb_free_mw(nt, i);
1438 iounmap(nt->mw_vec[i].vbase);
1441 kfree(nt->qp_vec);
1442 kfree(nt->mw_vec);
1443 kfree(nt);
1989 struct ntb_transport_ctx *nt;
2000 nt = ndev->ctx;
2004 free_queue = ffs(nt->qp_bitmap_free);
2011 qp = &nt->qp_vec[free_queue];
2014 nt->qp_bitmap_free &= ~qp_bit;
2103 nt->qp_bitmap_free |= qp_bit;
2440 struct ntb_transport_ctx *nt = data;
2445 if (ntb_db_read(nt->ndev) & nt->msi_db_mask) {
2446 ntb_transport_msi_peer_desc_changed(nt);
2447 ntb_db_clear(nt->ndev, nt->msi_db_mask);
2450 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2451 ntb_db_vector_mask(nt->ndev, vector));
2455 qp = &nt->qp_vec[qp_num];