Lines Matching refs:dev

198 	struct mlx4_dev *dev = &priv->dev;
200 ctx->val.vbool = dev->persist->crdump.snapshot_enable;
208 struct mlx4_dev *dev = &priv->dev;
210 dev->persist->crdump.snapshot_enable = ctx->val.vbool;
292 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
299 dev->caps.reserved_uars =
301 mlx4_get_num_reserved_uar(dev),
303 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
306 int mlx4_check_port_params(struct mlx4_dev *dev,
311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
312 for (i = 0; i < dev->caps.num_ports - 1; i++) {
314 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
320 for (i = 0; i < dev->caps.num_ports; i++) {
321 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
322 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
330 static void mlx4_set_port_mask(struct mlx4_dev *dev)
334 for (i = 1; i <= dev->caps.num_ports; ++i)
335 dev->caps.port_mask[i] = dev->caps.port_type[i];
342 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
348 err = mlx4_QUERY_FUNC(dev, &func, 0);
350 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
361 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
363 struct mlx4_caps *dev_cap = &dev->caps;
381 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
386 if (mlx4_is_master(dev))
390 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
396 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
399 dev->caps.vl_cap[port] = port_cap->max_vl;
400 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
401 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
402 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
406 dev->caps.gid_table_len[port] = port_cap->max_gids;
407 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
408 dev->caps.port_width_cap[port] = port_cap->max_port_width;
409 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
410 dev->caps.max_tc_eth = port_cap->max_tc_eth;
411 dev->caps.def_mac[port] = port_cap->def_mac;
412 dev->caps.supported_type[port] = port_cap->supported_port_types;
413 dev->caps.suggested_type[port] = port_cap->suggested_type;
414 dev->caps.default_sense[port] = port_cap->default_sense;
415 dev->caps.trans_type[port] = port_cap->trans_type;
416 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
417 dev->caps.wavelength[port] = port_cap->wavelength;
418 dev->caps.trans_code[port] = port_cap->trans_code;
423 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
428 err = mlx4_QUERY_PORT(dev, port, port_cap);
431 mlx4_err(dev, "QUERY_PORT command failed.\n");
436 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
438 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
441 if (mlx4_is_mfunc(dev)) {
442 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
443 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
447 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
448 mlx4_dbg(dev,
450 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
456 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
461 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
463 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
466 mlx4_dev_cap_dump(dev, dev_cap);
469 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
474 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
479 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
480 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
483 pci_resource_len(dev->persist->pdev, 2));
487 dev->caps.num_ports = dev_cap->num_ports;
488 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
489 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
490 dev->caps.num_sys_eqs :
492 for (i = 1; i <= dev->caps.num_ports; ++i) {
493 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
495 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
500 dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
501 dev->caps.uar_page_size = PAGE_SIZE;
502 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
503 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
504 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
505 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
506 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
507 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
508 dev->caps.max_wqes = dev_cap->max_qp_sz;
509 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
510 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
511 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
512 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
513 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
519 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
520 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
521 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
522 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
523 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
525 dev->caps.reserved_pds = dev_cap->reserved_pds;
526 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
528 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
530 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
532 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
533 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
534 dev->caps.flags = dev_cap->flags;
535 dev->caps.flags2 = dev_cap->flags2;
536 dev->caps.bmme_flags = dev_cap->bmme_flags;
537 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
538 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
539 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
540 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
541 dev->caps.wol_port[1] = dev_cap->wol_port[1];
542 dev->caps.wol_port[2] = dev_cap->wol_port[2];
543 dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
546 if (!mlx4_is_slave(dev)) {
550 if (enable_4k_uar || !dev->persist->num_vfs)
551 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
553 dev->uar_page_shift = PAGE_SHIFT;
555 mlx4_set_num_reserved_uars(dev, dev_cap);
558 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
562 err = mlx4_QUERY_HCA(dev, &hca_param);
570 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
574 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
575 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
577 if (mlx4_is_mfunc(dev))
578 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
581 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
582 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
584 dev->caps.log_num_macs = log_num_mac;
585 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
588 for (i = 1; i <= dev->caps.num_ports; ++i) {
589 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
590 if (dev->caps.supported_type[i]) {
592 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
593 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
595 else if (dev->caps.supported_type[i] ==
597 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
603 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
606 dev->caps.port_type[i] = port_type_array[i - 1];
615 mlx4_priv(dev)->sense.sense_allowed[i] =
616 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
617 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
618 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
625 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
627 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
628 mlx4_SENSE_PORT(dev, i, &sensed_port);
630 dev->caps.port_type[i] = sensed_port;
632 dev->caps.possible_type[i] = dev->caps.port_type[i];
635 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
636 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
637 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
638 i, 1 << dev->caps.log_num_macs);
640 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
641 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
642 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
643 i, 1 << dev->caps.log_num_vlans);
647 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
650 mlx4_warn(dev,
652 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
655 dev->caps.max_counters = dev_cap->max_counters;
657 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
659 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
660 (1 << dev->caps.log_num_macs) *
661 (1 << dev->caps.log_num_vlans) *
662 dev->caps.num_ports;
663 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
666 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
667 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
669 dev->caps.dmfs_high_rate_qpn_base =
670 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
673 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
674 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
675 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
676 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
678 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
679 dev->caps.dmfs_high_rate_qpn_base =
680 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
681 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
684 dev->caps.rl_caps = dev_cap->rl_caps;
686 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
687 dev->caps.dmfs_high_rate_qpn_range;
689 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
690 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
691 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
692 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
694 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
696 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
699 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
700 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
701 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
707 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
713 if ((dev->caps.flags &
715 mlx4_is_master(dev))
716 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
718 if (!mlx4_is_slave(dev)) {
719 mlx4_enable_cqe_eqe_stride(dev);
720 dev->caps.alloc_res_qp_mask =
721 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
724 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
725 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
726 mlx4_warn(dev, "Old device ETS support detected\n");
727 mlx4_warn(dev, "Consider upgrading device FW.\n");
728 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
732 dev->caps.alloc_res_qp_mask = 0;
735 mlx4_enable_ignore_fcs(dev);
741 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
743 struct mlx4_priv *priv = mlx4_priv(dev);
748 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
752 mlx4_warn(dev, "%s: slave: %d is still active\n",
760 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
764 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
765 qpn < dev->phys_caps.base_proxy_sqpn)
768 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
770 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
772 qk += qpn - dev->phys_caps.base_proxy_sqpn;
778 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
780 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
782 if (!mlx4_is_master(dev))
789 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
791 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
793 if (!mlx4_is_master(dev))
800 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
802 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
804 if (!mlx4_is_master(dev))
811 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
813 struct mlx4_priv *priv = mlx4_priv(dev);
816 if (!mlx4_is_master(dev))
841 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
845 dev->caps.steering_mode = hca_param->steering_mode;
846 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
847 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
848 dev->caps.fs_log_max_ucast_qp_range_size =
851 dev->caps.num_qp_per_mgm =
854 mlx4_dbg(dev, "Steering mode is: %s\n",
855 mlx4_steering_mode_str(dev->caps.steering_mode));
858 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
860 kfree(dev->caps.spec_qps);
861 dev->caps.spec_qps = NULL;
864 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
867 struct mlx4_caps *caps = &dev->caps;
874 mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
880 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap);
882 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
889 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
893 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n",
901 mlx4_slave_destroy_special_qp_cap(dev);
906 static int mlx4_slave_cap(struct mlx4_dev *dev)
918 mlx4_err(dev, "Failed to allocate memory for slave_cap\n");
923 err = mlx4_QUERY_HCA(dev, hca_param);
925 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
933 mlx4_err(dev, "Unknown hca global capabilities\n");
938 dev->caps.hca_core_clock = hca_param->hca_core_clock;
940 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp;
941 err = mlx4_dev_cap(dev, dev_cap);
943 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
947 err = mlx4_QUERY_FW(dev);
949 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
951 page_size = ~dev->caps.page_size_cap + 1;
952 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
954 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
961 dev->uar_page_shift = hca_param->uar_page_sz + 12;
964 if (dev->uar_page_shift > PAGE_SHIFT) {
965 mlx4_err(dev,
972 mlx4_set_num_reserved_uars(dev, dev_cap);
978 dev->caps.uar_page_size = PAGE_SIZE;
980 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap);
982 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
989 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
996 dev->caps.num_ports = func_cap->num_ports;
997 dev->quotas.qp = func_cap->qp_quota;
998 dev->quotas.srq = func_cap->srq_quota;
999 dev->quotas.cq = func_cap->cq_quota;
1000 dev->quotas.mpt = func_cap->mpt_quota;
1001 dev->quotas.mtt = func_cap->mtt_quota;
1002 dev->caps.num_qps = 1 << hca_param->log_num_qps;
1003 dev->caps.num_srqs = 1 << hca_param->log_num_srqs;
1004 dev->caps.num_cqs = 1 << hca_param->log_num_cqs;
1005 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz;
1006 dev->caps.num_eqs = func_cap->max_eq;
1007 dev->caps.reserved_eqs = func_cap->reserved_eq;
1008 dev->caps.reserved_lkey = func_cap->reserved_lkey;
1009 dev->caps.num_pds = MLX4_NUM_PDS;
1010 dev->caps.num_mgms = 0;
1011 dev->caps.num_amgms = 0;
1013 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
1014 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
1015 dev->caps.num_ports, MLX4_MAX_PORTS);
1020 mlx4_replace_zero_macs(dev);
1022 err = mlx4_slave_special_qp_cap(dev);
1024 mlx4_err(dev, "Set special QP caps failed. aborting\n");
1028 if (dev->caps.uar_page_size * (dev->caps.num_uars -
1029 dev->caps.reserved_uars) >
1030 pci_resource_len(dev->persist->pdev,
1032 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
1033 dev->caps.uar_page_size * dev->caps.num_uars,
1035 pci_resource_len(dev->persist->pdev, 2));
1041 dev->caps.eqe_size = 64;
1042 dev->caps.eqe_factor = 1;
1044 dev->caps.eqe_size = 32;
1045 dev->caps.eqe_factor = 0;
1049 dev->caps.cqe_size = 64;
1050 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1052 dev->caps.cqe_size = 32;
1056 dev->caps.eqe_size = hca_param->eqe_size;
1057 dev->caps.eqe_factor = 0;
1061 dev->caps.cqe_size = hca_param->cqe_size;
1063 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1066 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1067 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1069 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
1070 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n");
1072 slave_adjust_steering_mode(dev, dev_cap, hca_param);
1073 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1077 dev->caps.bf_reg_size)
1078 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1081 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1085 mlx4_slave_destroy_special_qp_cap(dev);
1097 int mlx4_change_port_types(struct mlx4_dev *dev,
1104 for (port = 0; port < dev->caps.num_ports; port++) {
1107 if (port_types[port] != dev->caps.port_type[port + 1])
1111 mlx4_unregister_device(dev);
1112 for (port = 1; port <= dev->caps.num_ports; port++) {
1113 mlx4_CLOSE_PORT(dev, port);
1114 dev->caps.port_type[port] = port_types[port - 1];
1115 err = mlx4_SET_PORT(dev, port, -1);
1117 mlx4_err(dev, "Failed to set port %d, aborting\n",
1122 mlx4_set_port_mask(dev);
1123 err = mlx4_register_device(dev);
1125 mlx4_err(dev, "Failed to register device\n");
1134 static ssize_t show_port_type(struct device *dev,
1140 struct mlx4_dev *mdev = info->dev;
1157 struct mlx4_dev *mdev = info->dev;
1220 static ssize_t set_port_type(struct device *dev,
1226 struct mlx4_dev *mdev = info->dev;
1285 static ssize_t show_port_ib_mtu(struct device *dev,
1291 struct mlx4_dev *mdev = info->dev;
1301 static ssize_t set_port_ib_mtu(struct device *dev,
1307 struct mlx4_dev *mdev = info->dev;
1348 static int mlx4_mf_bond(struct mlx4_dev *dev)
1355 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1356 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1360 dev->persist->num_vfs + 1) > 1) {
1361 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1368 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1369 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1373 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1378 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1379 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1383 err = mlx4_bond_mac_table(dev);
1386 err = mlx4_bond_vlan_table(dev);
1389 err = mlx4_bond_fs_rules(dev);
1395 (void)mlx4_unbond_vlan_table(dev);
1397 (void)mlx4_unbond_mac_table(dev);
1401 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1405 ret = mlx4_unbond_fs_rules(dev);
1407 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
1408 ret1 = mlx4_unbond_mac_table(dev);
1410 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1413 ret1 = mlx4_unbond_vlan_table(dev);
1415 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1421 static int mlx4_bond(struct mlx4_dev *dev)
1424 struct mlx4_priv *priv = mlx4_priv(dev);
1428 if (!mlx4_is_bonded(dev)) {
1429 ret = mlx4_do_bond(dev, true);
1431 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1432 if (!ret && mlx4_is_master(dev)) {
1433 ret = mlx4_mf_bond(dev);
1435 mlx4_err(dev, "bond for multifunction failed\n");
1436 mlx4_do_bond(dev, false);
1443 mlx4_dbg(dev, "Device is bonded\n");
1448 static int mlx4_unbond(struct mlx4_dev *dev)
1451 struct mlx4_priv *priv = mlx4_priv(dev);
1455 if (mlx4_is_bonded(dev)) {
1458 ret = mlx4_do_bond(dev, false);
1460 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1461 if (mlx4_is_master(dev))
1462 ret2 = mlx4_mf_unbond(dev);
1464 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1471 mlx4_dbg(dev, "Device is unbonded\n");
1476 static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1480 struct mlx4_priv *priv = mlx4_priv(dev);
1483 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1504 err = mlx4_virt2phy_port_map(dev, port1, port2);
1506 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1511 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1521 struct mlx4_dev *dev;
1532 if (!mlx4_is_bonded(bond->dev)) {
1533 err = mlx4_bond(bond->dev);
1535 mlx4_err(bond->dev, "Fail to bond device\n");
1538 err = mlx4_port_map_set(bond->dev, &bond->port_map);
1540 mlx4_err(bond->dev,
1545 } else if (mlx4_is_bonded(bond->dev)) {
1546 err = mlx4_unbond(bond->dev);
1548 mlx4_err(bond->dev, "Fail to unbond device\n");
1550 put_device(&bond->dev->persist->pdev->dev);
1554 int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
1564 get_device(&dev->persist->pdev->dev);
1565 bond->dev = dev;
1574 static int mlx4_load_fw(struct mlx4_dev *dev)
1576 struct mlx4_priv *priv = mlx4_priv(dev);
1579 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1582 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1586 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1588 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1592 err = mlx4_RUN_FW(dev);
1594 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1601 mlx4_UNMAP_FA(dev);
1604 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1608 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1611 struct mlx4_priv *priv = mlx4_priv(dev);
1615 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1619 cmpt_entry_sz, dev->caps.num_qps,
1620 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1625 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1629 cmpt_entry_sz, dev->caps.num_srqs,
1630 dev->caps.reserved_srqs, 0, 0);
1634 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1638 cmpt_entry_sz, dev->caps.num_cqs,
1639 dev->caps.reserved_cqs, 0, 0);
1643 num_eqs = dev->phys_caps.num_phys_eqs;
1644 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1655 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1658 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1661 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1667 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1670 struct mlx4_priv *priv = mlx4_priv(dev);
1675 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1677 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1681 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1685 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1688 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1692 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1694 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1698 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1700 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1705 num_eqs = dev->phys_caps.num_phys_eqs;
1706 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1710 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1718 * dev->caps.mtt_entry_sz below is really the MTT segment
1721 dev->caps.reserved_mtts =
1722 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1723 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1725 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1727 dev->caps.mtt_entry_sz,
1728 dev->caps.num_mtts,
1729 dev->caps.reserved_mtts, 1, 0);
1731 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1735 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1738 dev->caps.num_mpts,
1739 dev->caps.reserved_mrws, 1, 1);
1741 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1745 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1748 dev->caps.num_qps,
1749 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1752 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1756 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1759 dev->caps.num_qps,
1760 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1763 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1767 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1770 dev->caps.num_qps,
1771 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1774 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1778 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1781 dev->caps.num_qps,
1782 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1785 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1789 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1792 dev->caps.num_cqs,
1793 dev->caps.reserved_cqs, 0, 0);
1795 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1799 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1802 dev->caps.num_srqs,
1803 dev->caps.reserved_srqs, 0, 0);
1805 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1816 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1818 mlx4_get_mgm_entry_size(dev),
1819 dev->caps.num_mgms + dev->caps.num_amgms,
1820 dev->caps.num_mgms + dev->caps.num_amgms,
1823 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1830 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1833 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1836 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1839 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1842 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1845 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1848 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1851 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1854 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1857 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1858 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1859 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1860 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1863 mlx4_UNMAP_ICM_AUX(dev);
1866 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1871 static void mlx4_free_icms(struct mlx4_dev *dev)
1873 struct mlx4_priv *priv = mlx4_priv(dev);
1875 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1876 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1877 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1878 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1879 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1880 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1881 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1882 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1883 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1884 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1885 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1886 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1887 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1888 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1890 mlx4_UNMAP_ICM_AUX(dev);
1891 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1894 static void mlx4_slave_exit(struct mlx4_dev *dev)
1896 struct mlx4_priv *priv = mlx4_priv(dev);
1899 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1901 mlx4_warn(dev, "Failed to close slave function\n");
1905 static int map_bf_area(struct mlx4_dev *dev)
1907 struct mlx4_priv *priv = mlx4_priv(dev);
1912 if (!dev->caps.bf_reg_size)
1915 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1916 (dev->caps.num_uars << PAGE_SHIFT);
1917 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1918 (dev->caps.num_uars << PAGE_SHIFT);
1926 static void unmap_bf_area(struct mlx4_dev *dev)
1928 if (mlx4_priv(dev)->bf_mapping)
1929 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1932 u64 mlx4_read_clock(struct mlx4_dev *dev)
1937 struct mlx4_priv *priv = mlx4_priv(dev);
1954 static int map_internal_clock(struct mlx4_dev *dev)
1956 struct mlx4_priv *priv = mlx4_priv(dev);
1959 ioremap(pci_resource_start(dev->persist->pdev,
1969 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1972 struct mlx4_priv *priv = mlx4_priv(dev);
1974 if (mlx4_is_slave(dev))
1977 if (!dev->caps.map_clock_to_user) {
1978 mlx4_dbg(dev, "Map clock to user is not supported.\n");
1993 static void unmap_internal_clock(struct mlx4_dev *dev)
1995 struct mlx4_priv *priv = mlx4_priv(dev);
2001 static void mlx4_close_hca(struct mlx4_dev *dev)
2003 unmap_internal_clock(dev);
2004 unmap_bf_area(dev);
2005 if (mlx4_is_slave(dev))
2006 mlx4_slave_exit(dev);
2008 mlx4_CLOSE_HCA(dev, 0);
2009 mlx4_free_icms(dev);
2013 static void mlx4_close_fw(struct mlx4_dev *dev)
2015 if (!mlx4_is_slave(dev)) {
2016 mlx4_UNMAP_FA(dev);
2017 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
2021 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
2028 struct mlx4_priv *priv = mlx4_priv(dev);
2042 if (dev->persist->interface_state &
2053 mlx4_err(dev, "Communication channel is offline.\n");
2057 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
2061 struct mlx4_priv *priv = mlx4_priv(dev);
2070 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
2073 static int mlx4_init_slave(struct mlx4_dev *dev)
2075 struct mlx4_priv *priv = mlx4_priv(dev);
2082 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
2088 if (mlx4_comm_check_offline(dev)) {
2089 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
2093 mlx4_reset_vf_support(dev);
2094 mlx4_warn(dev, "Sending reset\n");
2095 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
2101 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
2115 mlx4_err(dev, "slave driver version is not supported by the master\n");
2119 mlx4_warn(dev, "Sending vhcr0\n");
2120 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2123 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2126 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2129 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2137 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2143 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2147 for (i = 1; i <= dev->caps.num_ports; i++) {
2148 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2149 dev->caps.gid_table_len[i] =
2150 mlx4_get_slave_num_gids(dev, 0, i);
2152 dev->caps.gid_table_len[i] = 1;
2153 dev->caps.pkey_table_len[i] =
2154 dev->phys_caps.pkey_phys_table_len[i] - 1;
2196 static void choose_steering_mode(struct mlx4_dev *dev,
2201 if (dev->caps.dmfs_high_steer_mode ==
2203 mlx4_err(dev, "DMFS high rate mode not supported\n");
2205 dev->caps.dmfs_high_steer_mode =
2212 (!mlx4_is_mfunc(dev) ||
2214 (dev->persist->num_vfs + 1))) &&
2217 dev->oper_log_mgm_entry_size =
2219 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2220 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2221 dev->caps.fs_log_max_ucast_qp_range_size =
2224 if (dev->caps.dmfs_high_steer_mode !=
2226 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2227 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2228 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2229 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2231 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2233 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2234 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2235 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2237 dev->oper_log_mgm_entry_size =
2241 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2243 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2244 mlx4_steering_mode_str(dev->caps.steering_mode),
2245 dev->oper_log_mgm_entry_size,
2249 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2252 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2254 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2256 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2258 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2262 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2267 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2270 for (i = 1; i <= dev->caps.num_ports; i++) {
2271 if (mlx4_dev_port(dev, i, &port_cap)) {
2272 mlx4_err(dev,
2274 } else if ((dev->caps.dmfs_high_steer_mode !=
2277 !!(dev->caps.dmfs_high_steer_mode ==
2279 mlx4_err(dev,
2282 dev->caps.dmfs_high_steer_mode),
2291 static int mlx4_init_fw(struct mlx4_dev *dev)
2296 if (!mlx4_is_slave(dev)) {
2297 err = mlx4_QUERY_FW(dev);
2300 mlx4_info(dev, "non-primary physical function, skipping\n");
2302 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2306 err = mlx4_load_fw(dev);
2308 mlx4_err(dev, "Failed to start FW, aborting\n");
2314 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2316 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2322 static int mlx4_init_hca(struct mlx4_dev *dev)
2324 struct mlx4_priv *priv = mlx4_priv(dev);
2333 if (!mlx4_is_slave(dev)) {
2342 err = mlx4_dev_cap(dev, dev_cap);
2344 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2348 choose_steering_mode(dev, dev_cap);
2349 choose_tunnel_offload_mode(dev, dev_cap);
2351 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2352 mlx4_is_master(dev))
2353 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2355 err = mlx4_get_phys_port_id(dev);
2357 mlx4_err(dev, "Fail to get physical port id\n");
2359 if (mlx4_is_master(dev))
2360 mlx4_parav_master_pf_caps(dev);
2363 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2368 if (dev->caps.steering_mode ==
2372 icm_size = mlx4_make_profile(dev, &profile, dev_cap,
2379 if (enable_4k_uar || !dev->persist->num_vfs) {
2380 init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
2384 init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
2389 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2390 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2393 err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
2397 err = mlx4_INIT_HCA(dev, init_hca);
2399 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2404 err = mlx4_query_func(dev, dev_cap);
2406 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2409 dev->caps.num_eqs = dev_cap->max_eqs;
2410 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2411 dev->caps.reserved_uars = dev_cap->reserved_uars;
2419 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2420 err = mlx4_QUERY_HCA(dev, init_hca);
2422 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2423 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2425 dev->caps.hca_core_clock =
2432 if (!dev->caps.hca_core_clock) {
2433 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2434 mlx4_err(dev,
2436 } else if (map_internal_clock(dev)) {
2441 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2442 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2446 if (dev->caps.dmfs_high_steer_mode !=
2448 if (mlx4_validate_optimized_steering(dev))
2449 mlx4_warn(dev, "Optimized steering validation failed\n");
2451 if (dev->caps.dmfs_high_steer_mode ==
2453 dev->caps.dmfs_high_rate_qpn_base =
2454 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2455 dev->caps.dmfs_high_rate_qpn_range =
2459 mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
2461 dev->caps.dmfs_high_steer_mode));
2464 err = mlx4_init_slave(dev);
2467 mlx4_err(dev, "Failed to initialize slave\n");
2471 err = mlx4_slave_cap(dev);
2473 mlx4_err(dev, "Failed to obtain slave caps\n");
2478 if (map_bf_area(dev))
2479 mlx4_dbg(dev, "Failed to map blue flame area\n");
2482 if (!mlx4_is_slave(dev))
2483 mlx4_set_port_mask(dev);
2485 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2487 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2492 err = mlx4_config_dev_retrieval(dev, &params);
2494 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2496 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2497 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2500 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
2506 unmap_internal_clock(dev);
2507 unmap_bf_area(dev);
2509 if (mlx4_is_slave(dev))
2510 mlx4_slave_destroy_special_qp_cap(dev);
2513 if (mlx4_is_slave(dev))
2514 mlx4_slave_exit(dev);
2516 mlx4_CLOSE_HCA(dev, 0);
2519 if (!mlx4_is_slave(dev))
2520 mlx4_free_icms(dev);
2529 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2531 struct mlx4_priv *priv = mlx4_priv(dev);
2534 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2537 if (!dev->caps.max_counters)
2540 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2544 nent_pow2 - dev->caps.max_counters + 1);
2547 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2549 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2552 if (!dev->caps.max_counters)
2555 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2558 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2560 struct mlx4_priv *priv = mlx4_priv(dev);
2563 for (port = 0; port < dev->caps.num_ports; port++)
2565 mlx4_counter_free(dev, priv->def_counter[port]);
2568 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2570 struct mlx4_priv *priv = mlx4_priv(dev);
2574 for (port = 0; port < dev->caps.num_ports; port++)
2577 for (port = 0; port < dev->caps.num_ports; port++) {
2578 err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
2586 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2587 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2588 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2589 MLX4_SINK_COUNTER_INDEX(dev));
2592 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2594 mlx4_cleanup_default_counters(dev);
2598 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2605 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2607 struct mlx4_priv *priv = mlx4_priv(dev);
2609 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2614 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2621 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
2627 if (mlx4_is_mfunc(dev)) {
2628 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
2637 return __mlx4_counter_alloc(dev, idx);
2641 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2648 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2652 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2656 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2660 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2662 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2665 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2668 __mlx4_clear_if_stat(dev, idx);
2670 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2674 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2678 if (mlx4_is_mfunc(dev)) {
2680 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2685 __mlx4_counter_free(dev, idx);
2689 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2691 struct mlx4_priv *priv = mlx4_priv(dev);
2697 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2699 struct mlx4_priv *priv = mlx4_priv(dev);
2705 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2713 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2715 struct mlx4_priv *priv = mlx4_priv(dev);
2728 static int mlx4_setup_hca(struct mlx4_dev *dev)
2730 struct mlx4_priv *priv = mlx4_priv(dev);
2735 err = mlx4_init_uar_table(dev);
2737 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2741 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2743 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2749 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2754 err = mlx4_init_pd_table(dev);
2756 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2760 err = mlx4_init_xrcd_table(dev);
2762 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2766 err = mlx4_init_mr_table(dev);
2768 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2772 if (!mlx4_is_slave(dev)) {
2773 err = mlx4_init_mcg_table(dev);
2775 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2778 err = mlx4_config_mad_demux(dev);
2780 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2785 err = mlx4_init_eq_table(dev);
2787 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2791 err = mlx4_cmd_use_events(dev);
2793 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2797 err = mlx4_NOP(dev);
2799 if (dev->flags & MLX4_FLAG_MSI_X) {
2800 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2802 mlx4_warn(dev, "Trying again without MSI-X\n");
2804 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2806 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2812 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2814 err = mlx4_init_cq_table(dev);
2816 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2820 err = mlx4_init_srq_table(dev);
2822 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2826 err = mlx4_init_qp_table(dev);
2828 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2832 if (!mlx4_is_slave(dev)) {
2833 err = mlx4_init_counters_table(dev);
2835 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2840 err = mlx4_allocate_default_counters(dev);
2842 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2846 if (!mlx4_is_slave(dev)) {
2847 for (port = 1; port <= dev->caps.num_ports; port++) {
2849 err = mlx4_get_port_ib_caps(dev, port,
2852 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2854 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2857 if (mlx4_is_master(dev)) {
2859 for (i = 0; i < dev->num_slaves; i++) {
2860 if (i == mlx4_master_func_num(dev))
2867 if (mlx4_is_mfunc(dev))
2868 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2870 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2872 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2873 dev->caps.pkey_table_len[port] : -1);
2875 mlx4_err(dev, "Failed to set port %d, aborting\n",
2885 mlx4_cleanup_default_counters(dev);
2888 if (!mlx4_is_slave(dev))
2889 mlx4_cleanup_counters_table(dev);
2892 mlx4_cleanup_qp_table(dev);
2895 mlx4_cleanup_srq_table(dev);
2898 mlx4_cleanup_cq_table(dev);
2901 mlx4_cmd_use_polling(dev);
2904 mlx4_cleanup_eq_table(dev);
2907 if (!mlx4_is_slave(dev))
2908 mlx4_cleanup_mcg_table(dev);
2911 mlx4_cleanup_mr_table(dev);
2914 mlx4_cleanup_xrcd_table(dev);
2917 mlx4_cleanup_pd_table(dev);
2923 mlx4_uar_free(dev, &priv->driver_uar);
2926 mlx4_cleanup_uar_table(dev);
2930 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2933 struct mlx4_priv *priv = mlx4_priv(dev);
2938 if (eqn > dev->caps.num_comp_vectors)
2942 off += mlx4_get_eqs_per_port(dev, i);
2960 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2962 struct mlx4_priv *priv = mlx4_priv(dev);
2968 int nreq = min3(dev->caps.num_ports *
2970 dev->caps.num_eqs - dev->caps.reserved_eqs,
2983 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2991 dev->caps.num_comp_vectors = nreq - 1;
2995 dev->caps.num_ports);
2997 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
3004 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
3006 dev->caps.num_ports);
3013 if (mlx4_init_affinity_hint(dev, port + 1, i))
3014 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
3018 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
3026 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
3028 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
3030 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
3036 dev->flags |= MLX4_FLAG_MSI_X;
3043 dev->caps.num_comp_vectors = 1;
3047 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
3050 dev->caps.num_ports);
3084 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
3086 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
3087 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
3100 dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
3103 dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
3106 info->dev = dev;
3108 if (!mlx4_is_slave(dev)) {
3109 mlx4_init_mac_table(dev, &info->mac_table);
3110 mlx4_init_vlan_table(dev, &info->vlan_table);
3111 mlx4_init_roce_gid_table(dev, &info->gid_table);
3112 info->base_qpn = mlx4_get_base_qpn(dev, port);
3117 if (mlx4_is_mfunc(dev)) {
3126 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
3128 mlx4_err(dev, "Failed to create file for port %d\n", port);
3137 if (mlx4_is_mfunc(dev)) {
3146 err = device_create_file(&dev->persist->pdev->dev,
3149 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
3150 device_remove_file(&info->dev->persist->pdev->dev,
3166 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3167 device_remove_file(&info->dev->persist->pdev->dev,
3178 static int mlx4_init_steering(struct mlx4_dev *dev)
3180 struct mlx4_priv *priv = mlx4_priv(dev);
3181 int num_entries = dev->caps.num_ports;
3197 static void mlx4_clear_steering(struct mlx4_dev *dev)
3199 struct mlx4_priv *priv = mlx4_priv(dev);
3202 int num_entries = dev->caps.num_ports;
3238 static int mlx4_get_ownership(struct mlx4_dev *dev)
3243 if (pci_channel_offline(dev->persist->pdev))
3246 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3250 mlx4_err(dev, "Failed to obtain ownership bit\n");
3259 static void mlx4_free_ownership(struct mlx4_dev *dev)
3263 if (pci_channel_offline(dev->persist->pdev))
3266 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3270 mlx4_err(dev, "Failed to obtain ownership bit\n");
3281 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3284 u64 dev_flags = dev->flags;
3290 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3292 if (!dev->dev_vfs)
3298 if (dev->flags & MLX4_FLAG_SRIOV) {
3300 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3306 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL);
3307 if (NULL == dev->dev_vfs) {
3308 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3312 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3314 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3319 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3323 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3327 mlx4_warn(dev, "Running in master mode\n");
3331 dev->persist->num_vfs = total_vfs;
3338 dev->persist->num_vfs = 0;
3339 kfree(dev->dev_vfs);
3340 dev->dev_vfs = NULL;
3348 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3355 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3362 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3364 struct pci_dev *pdev = dev->persist->pdev;
3367 mutex_lock(&dev->persist->pci_status_mutex);
3368 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3371 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3373 mutex_unlock(&dev->persist->pci_status_mutex);
3378 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3380 struct pci_dev *pdev = dev->persist->pdev;
3382 mutex_lock(&dev->persist->pci_status_mutex);
3383 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3385 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3387 mutex_unlock(&dev->persist->pci_status_mutex);
3395 struct mlx4_dev *dev;
3404 dev = &priv->dev;
3406 err = mlx4_adev_init(dev);
3422 dev->rev_id = pdev->revision;
3423 dev->numa_node = dev_to_node(&pdev->dev);
3427 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3428 dev->flags |= MLX4_FLAG_SLAVE;
3433 err = mlx4_get_ownership(dev);
3438 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3452 err = mlx4_reset(dev);
3454 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3459 dev->flags = MLX4_FLAG_MASTER;
3462 dev->flags |= MLX4_FLAG_SRIOV;
3463 dev->persist->num_vfs = total_vfs;
3470 dev->persist->state = MLX4_DEVICE_STATE_UP;
3473 err = mlx4_cmd_init(dev);
3475 mlx4_err(dev, "Failed to init command interface, aborting\n");
3482 if (mlx4_is_mfunc(dev)) {
3483 if (mlx4_is_master(dev)) {
3484 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3487 dev->num_slaves = 0;
3488 err = mlx4_multi_func_init(dev);
3490 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3496 err = mlx4_init_fw(dev);
3498 mlx4_err(dev, "Failed to init fw, aborting.\n");
3502 if (mlx4_is_master(dev)) {
3512 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3514 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3518 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3522 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3527 mlx4_close_fw(dev);
3528 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3529 dev->flags = dev_flags;
3530 if (!SRIOV_VALID_STATE(dev->flags)) {
3531 mlx4_err(dev, "Invalid SRIOV state\n");
3534 err = mlx4_reset(dev);
3536 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3547 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3549 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3553 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3558 err = mlx4_init_hca(dev);
3563 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3565 if (dev->flags & MLX4_FLAG_SRIOV) {
3568 if (mlx4_is_master(dev) && !reset_flow)
3570 dev->flags &= ~MLX4_FLAG_SRIOV;
3572 if (!mlx4_is_slave(dev))
3573 mlx4_free_ownership(dev);
3574 dev->flags |= MLX4_FLAG_SLAVE;
3575 dev->flags &= ~MLX4_FLAG_MASTER;
3581 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3582 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3585 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3586 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3587 dev->flags = dev_flags;
3588 err = mlx4_cmd_init(dev);
3593 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3597 dev->flags = dev_flags;
3600 if (!SRIOV_VALID_STATE(dev->flags)) {
3601 mlx4_err(dev, "Invalid SRIOV state\n");
3611 if (!mlx4_is_slave(dev))
3612 pcie_print_link_status(dev->persist->pdev);
3616 if (mlx4_is_master(dev)) {
3617 if (dev->caps.num_ports < 2 &&
3620 mlx4_err(dev,
3622 dev->caps.num_ports);
3625 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3628 i < sizeof(dev->persist->nvfs)/
3629 sizeof(dev->persist->nvfs[0]); i++) {
3632 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3633 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3634 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3635 dev->caps.num_ports;
3642 err = mlx4_multi_func_init(dev);
3644 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3649 err = mlx4_alloc_eq_table(dev);
3656 mlx4_enable_msi_x(dev);
3657 if ((mlx4_is_mfunc(dev)) &&
3658 !(dev->flags & MLX4_FLAG_MSI_X)) {
3660 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3664 if (!mlx4_is_slave(dev)) {
3665 err = mlx4_init_steering(dev);
3670 mlx4_init_quotas(dev);
3672 err = mlx4_setup_hca(dev);
3673 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3674 !mlx4_is_mfunc(dev)) {
3675 dev->flags &= ~MLX4_FLAG_MSI_X;
3676 dev->caps.num_comp_vectors = 1;
3678 err = mlx4_setup_hca(dev);
3687 if (mlx4_is_master(dev)) {
3688 err = mlx4_ARM_COMM_CHANNEL(dev);
3690 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3696 for (port = 1; port <= dev->caps.num_ports; port++) {
3697 err = mlx4_init_port_info(dev, port);
3705 err = mlx4_register_device(dev);
3709 mlx4_sense_init(dev);
3710 mlx4_start_sense(dev);
3714 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3724 mlx4_cleanup_default_counters(dev);
3725 if (!mlx4_is_slave(dev))
3726 mlx4_cleanup_counters_table(dev);
3727 mlx4_cleanup_qp_table(dev);
3728 mlx4_cleanup_srq_table(dev);
3729 mlx4_cleanup_cq_table(dev);
3730 mlx4_cmd_use_polling(dev);
3731 mlx4_cleanup_eq_table(dev);
3732 mlx4_cleanup_mcg_table(dev);
3733 mlx4_cleanup_mr_table(dev);
3734 mlx4_cleanup_xrcd_table(dev);
3735 mlx4_cleanup_pd_table(dev);
3736 mlx4_cleanup_uar_table(dev);
3739 if (!mlx4_is_slave(dev))
3740 mlx4_clear_steering(dev);
3743 if (dev->flags & MLX4_FLAG_MSI_X)
3747 mlx4_free_eq_table(dev);
3750 if (mlx4_is_master(dev)) {
3751 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3752 mlx4_multi_func_cleanup(dev);
3755 if (mlx4_is_slave(dev))
3756 mlx4_slave_destroy_special_qp_cap(dev);
3759 mlx4_close_hca(dev);
3762 mlx4_close_fw(dev);
3765 if (mlx4_is_slave(dev))
3766 mlx4_multi_func_cleanup(dev);
3769 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3772 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3774 dev->flags &= ~MLX4_FLAG_SRIOV;
3777 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3780 kfree(priv->dev.dev_vfs);
3782 if (!mlx4_is_slave(dev))
3783 mlx4_free_ownership(dev);
3788 mlx4_adev_cleanup(dev);
3805 err = mlx4_pci_enable_device(&priv->dev);
3807 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3819 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3828 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3834 dev_err(&pdev->dev,
3843 dev_err(&pdev->dev,
3855 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3861 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3868 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3874 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3876 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3877 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3879 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3885 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3904 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3912 err = mlx4_crdump_init(&priv->dev);
3916 err = mlx4_catas_init(&priv->dev);
3927 mlx4_catas_end(&priv->dev);
3930 mlx4_crdump_end(&priv->dev);
3936 mlx4_pci_disable_device(&priv->dev);
3943 struct mlx4_dev *dev = &priv->dev;
3944 struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
3992 struct mlx4_dev *dev = &priv->dev;
3993 struct mlx4_dev_persistent *persist = dev->persist;
4000 mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
4010 struct mlx4_dev *dev = &priv->dev;
4011 struct mlx4_dev_persistent *persist = dev->persist;
4017 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
4033 struct mlx4_dev *dev;
4038 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
4044 dev = &priv->dev;
4045 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
4046 if (!dev->persist) {
4050 dev->persist->pdev = pdev;
4051 dev->persist->dev = dev;
4052 pci_set_drvdata(pdev, dev->persist);
4054 mutex_init(&dev->persist->device_state_mutex);
4055 mutex_init(&dev->persist->interface_state_mutex);
4056 mutex_init(&dev->persist->pci_status_mutex);
4076 kfree(dev->persist);
4083 static void mlx4_clean_dev(struct mlx4_dev *dev)
4085 struct mlx4_dev_persistent *persist = dev->persist;
4086 struct mlx4_priv *priv = mlx4_priv(dev);
4087 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
4090 priv->dev.persist = persist;
4091 priv->dev.flags = flags;
4097 struct mlx4_dev *dev = persist->dev;
4098 struct mlx4_priv *priv = mlx4_priv(dev);
4109 for (i = 0; i < dev->caps.num_ports; i++) {
4110 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
4111 dev->persist->curr_port_poss_type[i] = dev->caps.
4117 mlx4_stop_sense(dev);
4118 mlx4_unregister_device(dev);
4120 for (p = 1; p <= dev->caps.num_ports; p++) {
4122 mlx4_CLOSE_PORT(dev, p);
4125 if (mlx4_is_master(dev))
4126 mlx4_free_resource_tracker(dev,
4129 mlx4_cleanup_default_counters(dev);
4130 if (!mlx4_is_slave(dev))
4131 mlx4_cleanup_counters_table(dev);
4132 mlx4_cleanup_qp_table(dev);
4133 mlx4_cleanup_srq_table(dev);
4134 mlx4_cleanup_cq_table(dev);
4135 mlx4_cmd_use_polling(dev);
4136 mlx4_cleanup_eq_table(dev);
4137 mlx4_cleanup_mcg_table(dev);
4138 mlx4_cleanup_mr_table(dev);
4139 mlx4_cleanup_xrcd_table(dev);
4140 mlx4_cleanup_pd_table(dev);
4142 if (mlx4_is_master(dev))
4143 mlx4_free_resource_tracker(dev,
4147 mlx4_uar_free(dev, &priv->driver_uar);
4148 mlx4_cleanup_uar_table(dev);
4149 if (!mlx4_is_slave(dev))
4150 mlx4_clear_steering(dev);
4151 mlx4_free_eq_table(dev);
4152 if (mlx4_is_master(dev))
4153 mlx4_multi_func_cleanup(dev);
4154 mlx4_close_hca(dev);
4155 mlx4_close_fw(dev);
4156 if (mlx4_is_slave(dev))
4157 mlx4_multi_func_cleanup(dev);
4158 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
4160 if (dev->flags & MLX4_FLAG_MSI_X)
4163 if (!mlx4_is_slave(dev))
4164 mlx4_free_ownership(dev);
4166 mlx4_slave_destroy_special_qp_cap(dev);
4167 kfree(dev->dev_vfs);
4169 mlx4_adev_cleanup(dev);
4171 mlx4_clean_dev(dev);
4179 struct mlx4_dev *dev = persist->dev;
4180 struct mlx4_priv *priv = mlx4_priv(dev);
4187 if (mlx4_is_slave(dev))
4195 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
4196 active_vfs = mlx4_how_many_lives_vf(dev);
4209 mlx4_info(dev, "%s: interface is down\n", __func__);
4210 mlx4_catas_end(dev);
4211 mlx4_crdump_end(dev);
4212 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
4213 mlx4_warn(dev, "Disabling SR-IOV\n");
4218 mlx4_pci_disable_device(dev);
4221 kfree(dev->persist);
4226 static int restore_current_port_types(struct mlx4_dev *dev,
4230 struct mlx4_priv *priv = mlx4_priv(dev);
4233 mlx4_stop_sense(dev);
4236 for (i = 0; i < dev->caps.num_ports; i++)
4237 dev->caps.possible_type[i + 1] = poss_types[i];
4238 err = mlx4_change_port_types(dev, types);
4239 mlx4_start_sense(dev);
4254 struct mlx4_dev *dev = persist->dev;
4255 struct mlx4_priv *priv = mlx4_priv(dev);
4260 total_vfs = dev->persist->num_vfs;
4261 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4267 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4272 err = restore_current_port_types(dev, dev->persist->curr_port_type,
4273 dev->persist->curr_port_poss_type);
4275 mlx4_err(dev, "could not restore original port types (%d)\n",
4343 struct mlx4_dev *dev = persist->dev;
4346 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4349 devlink = priv_to_devlink(mlx4_priv(dev));
4360 mlx4_pci_disable_device(persist->dev);
4367 struct mlx4_dev *dev = persist->dev;
4370 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4371 err = mlx4_pci_enable_device(dev);
4373 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4386 struct mlx4_dev *dev = persist->dev;
4387 struct mlx4_priv *priv = mlx4_priv(dev);
4393 mlx4_err(dev, "%s was called\n", __func__);
4394 total_vfs = dev->persist->num_vfs;
4395 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4404 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4409 err = restore_current_port_types(dev, dev->persist->
4410 curr_port_type, dev->persist->
4413 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4423 struct mlx4_dev *dev = persist->dev;
4426 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4427 devlink = priv_to_devlink(mlx4_priv(dev));
4434 mlx4_pci_disable_device(dev);
4447 struct mlx4_dev *dev = persist->dev;
4450 mlx4_err(dev, "suspend was called\n");
4451 devlink = priv_to_devlink(mlx4_priv(dev));
4466 struct mlx4_dev *dev = persist->dev;
4467 struct mlx4_priv *priv = mlx4_priv(dev);
4473 mlx4_err(dev, "resume was called\n");
4474 total_vfs = dev->persist->num_vfs;
4475 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4484 ret = restore_current_port_types(dev,
4485 dev->persist->curr_port_type,
4486 dev->persist->curr_port_poss_type);
4488 mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);