Lines Matching defs:rdev

97 	if (!wq->rdev->wr_log)
100 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
101 (wq->rdev->wr_log_size - 1);
102 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
119 wq->rdev->wr_log[idx] = le;
130 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
132 idx = atomic_read(&dev->rdev.wr_log_idx) &
133 (dev->rdev.wr_log_size - 1);
136 end = dev->rdev.wr_log_size - 1;
137 lep = &dev->rdev.wr_log[idx];
165 if (idx > (dev->rdev.wr_log_size - 1))
167 lep = &dev->rdev.wr_log[idx];
184 if (dev->rdev.wr_log)
185 for (i = 0; i < dev->rdev.wr_log_size; i++)
186 dev->rdev.wr_log[i].valid = 0;
383 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
386 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
476 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
477 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
479 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
480 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
482 dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
483 dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
485 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
486 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
488 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
489 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
491 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
492 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
494 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
495 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
496 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
497 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
498 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
501 dev->rdev.stats.db_state_transitions,
502 dev->rdev.stats.db_fc_interruptions);
503 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
505 dev->rdev.stats.act_ofld_conn_fails);
507 dev->rdev.stats.pas_ofld_conn_fails);
508 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
523 mutex_lock(&dev->rdev.stats.lock);
524 dev->rdev.stats.pd.max = 0;
525 dev->rdev.stats.pd.fail = 0;
526 dev->rdev.stats.qid.max = 0;
527 dev->rdev.stats.qid.fail = 0;
528 dev->rdev.stats.stag.max = 0;
529 dev->rdev.stats.stag.fail = 0;
530 dev->rdev.stats.pbl.max = 0;
531 dev->rdev.stats.pbl.fail = 0;
532 dev->rdev.stats.rqt.max = 0;
533 dev->rdev.stats.rqt.fail = 0;
534 dev->rdev.stats.rqt.max = 0;
535 dev->rdev.stats.rqt.fail = 0;
536 dev->rdev.stats.ocqp.max = 0;
537 dev->rdev.stats.ocqp.fail = 0;
538 dev->rdev.stats.db_full = 0;
539 dev->rdev.stats.db_empty = 0;
540 dev->rdev.stats.db_drop = 0;
541 dev->rdev.stats.db_state_transitions = 0;
542 dev->rdev.stats.tcam_full = 0;
543 dev->rdev.stats.act_ofld_conn_fails = 0;
544 dev->rdev.stats.pas_ofld_conn_fails = 0;
545 mutex_unlock(&dev->rdev.stats.lock);
747 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
757 if (!(entry->qid & rdev->qpmask)) {
758 c4iw_put_resource(&rdev->resource.qid_table,
760 mutex_lock(&rdev->stats.lock);
761 rdev->stats.qid.cur -= rdev->qpmask + 1;
762 mutex_unlock(&rdev->stats.lock);
775 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
784 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
789 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
796 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
798 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
799 rdev->lldi.ucq_density);
802 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
803 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
805 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
806 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
807 rdev->lldi.vr->cq.size);
812 if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
814 pci_name(rdev->lldi.pdev),
815 rdev->lldi.sge_host_page_size);
819 factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
820 rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
821 rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
824 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
825 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
826 rdev->lldi.vr->pbl.start,
827 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
828 rdev->lldi.vr->rq.size,
829 rdev->lldi.vr->qp.start,
830 rdev->lldi.vr->qp.size,
831 rdev->lldi.vr->cq.start,
832 rdev->lldi.vr->cq.size,
833 rdev->lldi.vr->srq.size);
835 &rdev->lldi.pdev->resource[2],
836 rdev->lldi.db_reg, rdev->lldi.gts_reg,
837 rdev->qpmask, rdev->cqmask);
839 if (c4iw_num_stags(rdev) == 0)
842 rdev->stats.pd.total = T4_MAX_NUM_PD;
843 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
844 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
845 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
846 rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
847 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
848 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
850 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
851 T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
856 err = c4iw_pblpool_create(rdev);
861 err = c4iw_rqtpool_create(rdev);
866 err = c4iw_ocqp_pool_create(rdev);
871 rdev->status_page = (struct t4_dev_status_page *)
873 if (!rdev->status_page) {
877 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
878 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
879 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
880 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
881 rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
884 rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
885 sizeof(*rdev->wr_log),
887 if (rdev->wr_log) {
888 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
889 atomic_set(&rdev->wr_log_idx, 0);
893 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
894 if (!rdev->free_workq) {
899 rdev->status_page->db_off = 0;
901 init_completion(&rdev->rqt_compl);
902 init_completion(&rdev->pbl_compl);
903 kref_init(&rdev->rqt_kref);
904 kref_init(&rdev->pbl_kref);
908 if (c4iw_wr_log && rdev->wr_log)
909 kfree(rdev->wr_log);
910 free_page((unsigned long)rdev->status_page);
912 c4iw_ocqp_pool_destroy(rdev);
914 c4iw_rqtpool_destroy(rdev);
916 c4iw_pblpool_destroy(rdev);
918 c4iw_destroy_resource(&rdev->resource);
922 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
924 kfree(rdev->wr_log);
925 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
926 free_page((unsigned long)rdev->status_page);
927 c4iw_pblpool_destroy(rdev);
928 c4iw_rqtpool_destroy(rdev);
929 wait_for_completion(&rdev->pbl_compl);
930 wait_for_completion(&rdev->rqt_compl);
931 c4iw_ocqp_pool_destroy(rdev);
932 destroy_workqueue(rdev->free_workq);
933 c4iw_destroy_resource(&rdev->resource);
938 c4iw_rdev_close(&ctx->dev->rdev);
945 if (ctx->dev->rdev.bar2_kva)
946 iounmap(ctx->dev->rdev.bar2_kva);
947 if (ctx->dev->rdev.oc_mw_kva)
948 iounmap(ctx->dev->rdev.oc_mw_kva);
987 devp->rdev.lldi = *infop;
991 devp->rdev.lldi.sge_ingpadboundary,
992 devp->rdev.lldi.sge_egrstatuspagesize);
994 devp->rdev.hw_queue.t4_eq_status_entries =
995 devp->rdev.lldi.sge_egrstatuspagesize / 64;
996 devp->rdev.hw_queue.t4_max_eq_size = 65520;
997 devp->rdev.hw_queue.t4_max_iq_size = 65520;
998 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
999 devp->rdev.hw_queue.t4_eq_status_entries - 1;
1000 devp->rdev.hw_queue.t4_max_sq_size =
1001 devp->rdev.hw_queue.t4_max_eq_size -
1002 devp->rdev.hw_queue.t4_eq_status_entries - 1;
1003 devp->rdev.hw_queue.t4_max_qp_depth =
1004 devp->rdev.hw_queue.t4_max_rq_size;
1005 devp->rdev.hw_queue.t4_max_cq_depth =
1006 devp->rdev.hw_queue.t4_max_iq_size - 2;
1007 devp->rdev.hw_queue.t4_stat_len =
1008 devp->rdev.lldi.sge_egrstatuspagesize;
1015 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
1016 if (!is_t4(devp->rdev.lldi.adapter_type)) {
1017 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
1018 pci_resource_len(devp->rdev.lldi.pdev, 2));
1019 if (!devp->rdev.bar2_kva) {
1025 devp->rdev.oc_mw_pa =
1026 pci_resource_start(devp->rdev.lldi.pdev, 2) +
1027 pci_resource_len(devp->rdev.lldi.pdev, 2) -
1028 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
1029 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
1030 devp->rdev.lldi.vr->ocq.size);
1031 if (!devp->rdev.oc_mw_kva) {
1039 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
1040 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
1042 ret = c4iw_rdev_open(&devp->rdev);
1044 pr_err("Unable to open CXIO rdev err %d\n", ret);
1055 mutex_init(&devp->rdev.stats.lock);
1059 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
1063 pci_name(devp->rdev.lldi.pdev),
1151 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1252 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1274 ctx->dev->rdev.stats.db_state_transitions++;
1276 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1280 ctx->dev->rdev.status_page->db_off = 1;
1323 ctx->dev->rdev.stats.db_state_transitions++;
1324 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1328 ctx->dev->rdev.status_page->db_off = 0;
1332 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1333 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1351 ctx->dev->rdev.stats.db_fc_interruptions++;
1378 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1391 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1408 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1428 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1477 ctx->dev->rdev.stats.db_full++;
1481 mutex_lock(&ctx->dev->rdev.stats.lock);
1482 ctx->dev->rdev.stats.db_empty++;
1483 mutex_unlock(&ctx->dev->rdev.stats.lock);
1487 mutex_lock(&ctx->dev->rdev.stats.lock);
1488 ctx->dev->rdev.stats.db_drop++;
1489 mutex_unlock(&ctx->dev->rdev.stats.lock);