Lines Matching defs:dev

84 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
121 mlx4_dbg(dev, "DEV_CAP flags:\n");
124 mlx4_dbg(dev, " %s\n", fname[i]);
127 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
175 mlx4_dbg(dev, " %s\n", fname[i]);
178 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
189 mailbox = mlx4_alloc_cmd_mailbox(dev);
197 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
200 mlx4_free_cmd_mailbox(dev, mailbox);
204 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
221 mailbox = mlx4_alloc_cmd_mailbox(dev);
228 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
250 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
255 mlx4_free_cmd_mailbox(dev, mailbox);
269 err = __mlx4_register_vlan(&priv->dev, port,
274 mlx4_warn(&priv->dev,
279 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
316 mlx4_warn(&priv->dev,
325 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
331 struct mlx4_priv *priv = mlx4_priv(dev);
396 mlx4_get_active_ports(dev, slave);
398 dev, slave, vhcr->in_modifier);
407 find_first_bit(actv_ports.ports, dev->caps.num_ports);
411 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
416 if (mlx4_vf_smi_enabled(dev, slave, port) &&
417 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
425 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
435 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
444 if (dev->caps.phv_bit[port])
452 mlx4_get_active_ports(dev, slave);
465 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
466 dev->caps.num_ports);
469 size = dev->caps.function_caps; /* set PF behaviours */
477 size = dev->caps.num_qps;
482 size = dev->caps.num_srqs;
487 size = dev->caps.num_cqs;
490 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
491 mlx4_QUERY_FUNC(dev, &func, slave)) {
494 dev->caps.num_eqs :
495 rounddown_pow_of_two(dev->caps.num_eqs);
497 size = dev->caps.reserved_eqs;
511 size = dev->caps.num_mpts;
516 size = dev->caps.num_mtts;
519 size = dev->caps.num_mgms + dev->caps.num_amgms;
527 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
539 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
555 mailbox = mlx4_alloc_cmd_mailbox(dev);
559 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
570 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
651 if (gen_or_port > dev->caps.num_ports) {
657 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
659 mlx4_err(dev, "VLAN is enforced on this port\n");
665 mlx4_err(dev, "Force mac is enabled on this port\n");
669 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
672 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
718 mlx4_free_cmd_mailbox(dev, mailbox);
725 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
833 mailbox = mlx4_alloc_cmd_mailbox(dev);
838 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
843 if (mlx4_is_mfunc(dev))
1109 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
1126 mlx4_free_cmd_mailbox(dev, mailbox);
1130 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
1133 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
1136 mlx4_dbg(dev, "BlueFlame not available\n");
1138 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
1140 mlx4_dbg(dev, "Max ICM size %lld MB\n",
1142 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1144 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1146 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1148 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
1151 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1153 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1155 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1157 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1159 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
1162 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
1164 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
1166 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
1167 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
1168 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
1169 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
1171 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
1177 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1182 dump_dev_cap_flags(dev, dev_cap->flags);
1183 dump_dev_cap_flags2(dev, dev_cap->flags2);
1186 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
1194 mailbox = mlx4_alloc_cmd_mailbox(dev);
1199 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1200 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1229 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1263 mlx4_free_cmd_mailbox(dev, mailbox);
1272 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1288 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1300 actv_ports = mlx4_get_active_ports(dev, slave);
1301 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1304 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1311 for (; slave_port < dev->caps.num_ports; ++slave_port)
1320 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1350 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1406 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1418 int port = mlx4_slave_convert_port(dev, slave,
1437 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1441 if (!err && dev->caps.function != slave) {
1452 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1459 else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) {
1463 err = mlx4_QUERY_PORT(dev, other_port, &port_cap);
1472 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1473 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1479 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1487 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1495 mailbox = mlx4_alloc_cmd_mailbox(dev);
1499 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1514 mlx4_free_cmd_mailbox(dev, mailbox);
1519 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1530 mailbox = mlx4_alloc_cmd_mailbox(dev);
1545 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1566 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1577 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1584 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1587 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1590 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1596 mlx4_free_cmd_mailbox(dev, mailbox);
1600 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1602 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1605 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1607 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1612 int mlx4_RUN_FW(struct mlx4_dev *dev)
1614 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1618 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1620 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1621 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1648 mailbox = mlx4_alloc_cmd_mailbox(dev);
1653 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1663 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1668 dev->caps.function = lg;
1670 if (mlx4_is_slave(dev))
1677 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1679 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1680 (int) (dev->caps.fw_ver >> 32),
1681 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1682 (int) dev->caps.fw_ver & 0xffff);
1683 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1690 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1695 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1696 (int) (dev->caps.fw_ver >> 32),
1697 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1698 (int) dev->caps.fw_ver & 0xffff,
1706 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1717 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1719 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1724 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1735 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1739 mlx4_free_cmd_mailbox(dev, mailbox);
1743 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1753 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1803 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1813 mailbox = mlx4_alloc_cmd_mailbox(dev);
1818 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1829 mlx4_free_cmd_mailbox(dev, mailbox);
1833 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1896 mailbox = mlx4_alloc_cmd_mailbox(dev);
1917 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1921 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1925 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1929 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1933 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1935 dev->caps.eqe_size = 64;
1936 dev->caps.eqe_factor = 1;
1938 dev->caps.eqe_size = 32;
1939 dev->caps.eqe_factor = 0;
1942 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1944 dev->caps.cqe_size = 64;
1945 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1947 dev->caps.cqe_size = 32;
1951 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1952 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1953 dev->caps.eqe_size = cache_line_size();
1954 dev->caps.cqe_size = cache_line_size();
1955 dev->caps.eqe_factor = 0;
1956 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1957 (ilog2(dev->caps.eqe_size) - 5)),
1961 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1964 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1967 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
1971 mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
1991 if (dev->caps.steering_mode ==
2005 if (dev->caps.dmfs_high_steer_mode !=
2020 if (dev->caps.dmfs_high_steer_mode !=
2023 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
2034 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
2053 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
2058 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
2062 mlx4_err(dev, "INIT_HCA returns %d\n", err);
2064 mlx4_free_cmd_mailbox(dev, mailbox);
2068 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2088 mailbox = mlx4_alloc_cmd_mailbox(dev);
2093 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2096 !mlx4_is_slave(dev));
2205 mlx4_free_cmd_mailbox(dev, mailbox);
2210 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
2216 mailbox = mlx4_alloc_cmd_mailbox(dev);
2218 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
2223 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2226 !mlx4_is_slave(dev));
2228 mlx4_warn(dev, "hca_core_clock update failed\n");
2232 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2235 mlx4_free_cmd_mailbox(dev, mailbox);
2243 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
2245 struct mlx4_priv *priv = mlx4_priv(dev);
2253 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
2259 struct mlx4_priv *priv = mlx4_priv(dev);
2260 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2269 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2272 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2279 if (slave == mlx4_master_func_num(dev)) {
2280 if (check_qp0_state(dev, slave, port) &&
2282 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2296 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
2304 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
2319 mailbox = mlx4_alloc_cmd_mailbox(dev);
2325 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
2326 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
2329 field = 128 << dev->caps.ib_mtu_cap[port];
2331 field = dev->caps.gid_table_len[port];
2333 field = dev->caps.pkey_table_len[port];
2336 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2339 mlx4_free_cmd_mailbox(dev, mailbox);
2341 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2345 mlx4_hca_core_clock_update(dev);
2351 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2357 struct mlx4_priv *priv = mlx4_priv(dev);
2358 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2368 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2370 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2378 if (slave == mlx4_master_func_num(dev)) {
2381 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2395 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2397 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2402 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2404 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2426 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2431 mailbox = mlx4_alloc_cmd_mailbox(dev);
2437 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2440 mlx4_free_cmd_mailbox(dev, mailbox);
2444 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2449 mailbox = mlx4_alloc_cmd_mailbox(dev);
2453 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2459 mlx4_free_cmd_mailbox(dev, mailbox);
2477 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2488 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2491 err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2515 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2523 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2528 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2538 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2541 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port)
2549 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2553 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2562 mailbox = mlx4_alloc_cmd_mailbox(dev);
2570 err = mlx4_cmd(dev, mailbox->dma, 0,
2574 mlx4_free_cmd_mailbox(dev, mailbox);
2579 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2581 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2597 int mlx4_NOP(struct mlx4_dev *dev)
2600 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2604 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
2613 mailbox = mlx4_alloc_cmd_mailbox(dev);
2619 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
2635 mlx4_free_cmd_mailbox(dev, mailbox);
2640 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2652 mailbox = mlx4_alloc_cmd_mailbox(dev);
2657 for (port = 1; port <= dev->caps.num_ports; port++) {
2659 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2663 mlx4_err(dev, "Fail to get port %d uplink guid\n",
2669 dev->caps.phys_port_id[port] = (u64)guid_lo |
2673 mlx4_free_cmd_mailbox(dev, mailbox);
2678 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2682 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2688 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2692 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2706 struct mlx4_dev *dev = &priv->dev;
2726 mailbox = mlx4_alloc_cmd_mailbox(dev);
2728 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2734 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2738 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2749 if (dev->caps.steering_mode ==
2751 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2765 err = mlx4_multicast_detach(dev, &qp,
2769 err = mlx4_multicast_attach(dev, &qp,
2779 mlx4_warn(dev, "Bad type for required operation\n");
2783 err = mlx4_cmd(dev, 0, ((u32) err |
2788 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2797 mlx4_free_cmd_mailbox(dev, mailbox);
2800 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2813 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2818 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2823 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2828 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2838 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2844 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2847 mailbox = mlx4_alloc_cmd_mailbox(dev);
2849 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2854 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2858 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2863 if (mlx4_check_smp_firewall_active(dev, mailbox))
2864 dev->flags |= MLX4_FLAG_SECURE_HOST;
2867 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2871 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2875 if (dev->flags & MLX4_FLAG_SECURE_HOST)
2876 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2878 mlx4_free_cmd_mailbox(dev, mailbox);
2905 * @dev: mlx4_dev.
2915 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2923 inbox = mlx4_alloc_cmd_mailbox(dev);
2927 outbox = mlx4_alloc_cmd_mailbox(dev);
2929 mlx4_free_cmd_mailbox(dev, inbox);
2947 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2955 mlx4_err(dev,
2963 mlx4_free_cmd_mailbox(dev, inbox);
2964 mlx4_free_cmd_mailbox(dev, outbox);
2976 * @dev: mlx4_dev.
2984 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2988 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2993 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
3003 if (slave != mlx4_master_func_num(dev) &&
3012 mlx4_slave_convert_port(dev, slave,
3016 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
3021 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
3031 mailbox = mlx4_alloc_cmd_mailbox(dev);
3041 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
3045 mlx4_free_cmd_mailbox(dev, mailbox);
3049 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
3055 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3062 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
3066 if (mlx4_is_slave(dev))
3069 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3070 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3071 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
3073 dev->caps.phv_bit[port] = new_val;
3081 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
3088 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3097 void mlx4_replace_zero_macs(struct mlx4_dev *dev)
3102 dev->port_random_macs = 0;
3103 for (i = 1; i <= dev->caps.num_ports; ++i)
3104 if (!dev->caps.def_mac[i] &&
3105 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
3107 dev->port_random_macs |= 1 << i;
3108 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);