1/* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: pv-drivers@vmware.com 24 * 25 */ 26 27 28#include "vmxnet3_int.h" 29#include <net/vxlan.h> 30#include <net/geneve.h> 31 32#define VXLAN_UDP_PORT 8472 33 34struct vmxnet3_stat_desc { 35 char desc[ETH_GSTRING_LEN]; 36 int offset; 37}; 38 39 40/* per tq stats maintained by the device */ 41static const struct vmxnet3_stat_desc 42vmxnet3_tq_dev_stats[] = { 43 /* description, offset */ 44 { "Tx Queue#", 0 }, 45 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 46 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 47 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 48 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 49 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 50 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 51 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 52 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 53 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 54 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 55}; 56 57/* per tq stats maintained by the driver */ 58static const struct vmxnet3_stat_desc 59vmxnet3_tq_driver_stats[] = { 60 /* description, offset */ 61 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 62 drop_total) }, 63 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 64 drop_too_many_frags) }, 65 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 66 drop_oversized_hdr) }, 67 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 68 drop_hdr_inspect_err) }, 69 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 70 drop_tso) }, 71 { " ring full", offsetof(struct vmxnet3_tq_driver_stats, 72 tx_ring_full) }, 73 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 74 linearized) }, 75 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 76 copy_skb_header) }, 77 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 78 oversized_hdr) }, 79}; 80 81/* per rq stats maintained by the device */ 82static const struct vmxnet3_stat_desc 83vmxnet3_rq_dev_stats[] = { 84 { "Rx Queue#", 0 }, 85 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 86 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 87 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 88 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 89 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 90 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 91 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 92 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 93 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 94 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 95}; 96 97/* per rq stats maintained by the driver */ 98static const struct vmxnet3_stat_desc 99vmxnet3_rq_driver_stats[] = { 100 /* description, offset */ 101 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 102 drop_total) }, 103 { " err", offsetof(struct vmxnet3_rq_driver_stats, 104 drop_err) }, 105 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 106 drop_fcs) }, 107 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 108 rx_buf_alloc_failure) }, 109}; 110 111/* global stats maintained by the driver */ 112static const struct vmxnet3_stat_desc 113vmxnet3_global_stats[] = { 114 /* description, offset */ 115 { "tx timeout count", offsetof(struct vmxnet3_adapter, 116 tx_timeout_count) } 117}; 118 119 120void 121vmxnet3_get_stats64(struct net_device *netdev, 122 struct rtnl_link_stats64 *stats) 123{ 124 struct vmxnet3_adapter *adapter; 125 struct vmxnet3_tq_driver_stats *drvTxStats; 126 struct vmxnet3_rq_driver_stats *drvRxStats; 127 struct UPT1_TxStats *devTxStats; 128 struct UPT1_RxStats *devRxStats; 129 unsigned long flags; 130 int i; 131 132 adapter = netdev_priv(netdev); 133 134 /* Collect the dev stats into the shared area */ 135 spin_lock_irqsave(&adapter->cmd_lock, flags); 136 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 137 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 138 139 for (i = 0; i < adapter->num_tx_queues; i++) { 140 devTxStats = &adapter->tqd_start[i].stats; 141 drvTxStats = &adapter->tx_queue[i].stats; 142 stats->tx_packets += devTxStats->ucastPktsTxOK + 143 devTxStats->mcastPktsTxOK + 144 devTxStats->bcastPktsTxOK; 145 stats->tx_bytes += devTxStats->ucastBytesTxOK + 146 devTxStats->mcastBytesTxOK + 147 devTxStats->bcastBytesTxOK; 148 stats->tx_errors += devTxStats->pktsTxError; 149 stats->tx_dropped += drvTxStats->drop_total; 150 } 151 152 for (i = 0; i < adapter->num_rx_queues; i++) { 153 devRxStats = &adapter->rqd_start[i].stats; 154 drvRxStats = &adapter->rx_queue[i].stats; 155 stats->rx_packets += devRxStats->ucastPktsRxOK + 156 devRxStats->mcastPktsRxOK + 157 devRxStats->bcastPktsRxOK; 158 159 stats->rx_bytes += devRxStats->ucastBytesRxOK + 160 devRxStats->mcastBytesRxOK + 161 devRxStats->bcastBytesRxOK; 162 163 stats->rx_errors += devRxStats->pktsRxError; 164 stats->rx_dropped += drvRxStats->drop_total; 165 stats->multicast += devRxStats->mcastPktsRxOK; 166 } 167} 168 169static int 170vmxnet3_get_sset_count(struct net_device *netdev, int sset) 171{ 172 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 173 switch (sset) { 174 case ETH_SS_STATS: 175 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + 176 ARRAY_SIZE(vmxnet3_tq_driver_stats)) * 177 adapter->num_tx_queues + 178 (ARRAY_SIZE(vmxnet3_rq_dev_stats) + 179 ARRAY_SIZE(vmxnet3_rq_driver_stats)) * 180 adapter->num_rx_queues + 181 ARRAY_SIZE(vmxnet3_global_stats); 182 default: 183 return -EOPNOTSUPP; 184 } 185} 186 187 188/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 189 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 190 * Therefore, if any registers are added, removed or modified, then a version 191 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 192 * --register-dump would be required. 193 */ 194static int 195vmxnet3_get_regs_len(struct net_device *netdev) 196{ 197 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 198 199 return ((9 /* BAR1 registers */ + 200 (1 + adapter->intr.num_intrs) + 201 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + 202 (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) * 203 sizeof(u32)); 204} 205 206 207static void 208vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 209{ 210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 211 212 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); 213 214 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, 215 sizeof(drvinfo->version)); 216 217 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 218 sizeof(drvinfo->bus_info)); 219} 220 221 222static void 223vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 224{ 225 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 226 if (stringset == ETH_SS_STATS) { 227 int i, j; 228 for (j = 0; j < adapter->num_tx_queues; j++) { 229 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 230 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 231 ETH_GSTRING_LEN); 232 buf += ETH_GSTRING_LEN; 233 } 234 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); 235 i++) { 236 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 237 ETH_GSTRING_LEN); 238 buf += ETH_GSTRING_LEN; 239 } 240 } 241 242 for (j = 0; j < adapter->num_rx_queues; j++) { 243 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 244 memcpy(buf, vmxnet3_rq_dev_stats[i].desc, 245 ETH_GSTRING_LEN); 246 buf += ETH_GSTRING_LEN; 247 } 248 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); 249 i++) { 250 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 251 ETH_GSTRING_LEN); 252 buf += ETH_GSTRING_LEN; 253 } 254 } 255 256 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 257 memcpy(buf, vmxnet3_global_stats[i].desc, 258 ETH_GSTRING_LEN); 259 buf += ETH_GSTRING_LEN; 260 } 261 } 262} 263 264netdev_features_t vmxnet3_fix_features(struct net_device *netdev, 265 netdev_features_t features) 266{ 267 /* If Rx checksum is disabled, then LRO should also be disabled */ 268 if (!(features & NETIF_F_RXCSUM)) 269 features &= ~NETIF_F_LRO; 270 271 return features; 272} 273 274netdev_features_t vmxnet3_features_check(struct sk_buff *skb, 275 struct net_device *netdev, 276 netdev_features_t features) 277{ 278 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 279 280 /* Validate if the tunneled packet is being offloaded by the device */ 281 if (VMXNET3_VERSION_GE_4(adapter) && 282 skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { 283 u8 l4_proto = 0; 284 u16 port; 285 struct udphdr *udph; 286 287 switch (vlan_get_protocol(skb)) { 288 case htons(ETH_P_IP): 289 l4_proto = ip_hdr(skb)->protocol; 290 break; 291 case htons(ETH_P_IPV6): 292 l4_proto = ipv6_hdr(skb)->nexthdr; 293 break; 294 default: 295 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 296 } 297 298 switch (l4_proto) { 299 case IPPROTO_UDP: 300 udph = udp_hdr(skb); 301 port = be16_to_cpu(udph->dest); 302 /* Check if offloaded port is supported */ 303 if (port != GENEVE_UDP_PORT && 304 port != IANA_VXLAN_UDP_PORT && 305 port != VXLAN_UDP_PORT) { 306 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 307 } 308 break; 309 default: 310 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 311 } 312 } 313 return features; 314} 315 316static void vmxnet3_enable_encap_offloads(struct net_device *netdev) 317{ 318 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 319 320 if (VMXNET3_VERSION_GE_4(adapter)) { 321 netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM | 322 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 323 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 324 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | 325 NETIF_F_GSO_UDP_TUNNEL_CSUM; 326 } 327} 328 329static void vmxnet3_disable_encap_offloads(struct net_device *netdev) 330{ 331 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 332 333 if (VMXNET3_VERSION_GE_4(adapter)) { 334 netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM | 335 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 336 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 337 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | 338 NETIF_F_GSO_UDP_TUNNEL_CSUM); 339 } 340} 341 342int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) 343{ 344 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 345 unsigned long flags; 346 netdev_features_t changed = features ^ netdev->features; 347 netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL | 348 NETIF_F_GSO_UDP_TUNNEL_CSUM; 349 u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0; 350 351 if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | 352 NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) { 353 if (features & NETIF_F_RXCSUM) 354 adapter->shared->devRead.misc.uptFeatures |= 355 UPT1_F_RXCSUM; 356 else 357 adapter->shared->devRead.misc.uptFeatures &= 358 ~UPT1_F_RXCSUM; 359 360 /* update hardware LRO capability accordingly */ 361 if (features & NETIF_F_LRO) 362 adapter->shared->devRead.misc.uptFeatures |= 363 UPT1_F_LRO; 364 else 365 adapter->shared->devRead.misc.uptFeatures &= 366 ~UPT1_F_LRO; 367 368 if (features & NETIF_F_HW_VLAN_CTAG_RX) 369 adapter->shared->devRead.misc.uptFeatures |= 370 UPT1_F_RXVLAN; 371 else 372 adapter->shared->devRead.misc.uptFeatures &= 373 ~UPT1_F_RXVLAN; 374 375 if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) { 376 vmxnet3_enable_encap_offloads(netdev); 377 adapter->shared->devRead.misc.uptFeatures |= 378 UPT1_F_RXINNEROFLD; 379 } else if ((features & tun_offload_mask) == 0 && 380 udp_tun_enabled) { 381 vmxnet3_disable_encap_offloads(netdev); 382 adapter->shared->devRead.misc.uptFeatures &= 383 ~UPT1_F_RXINNEROFLD; 384 } 385 386 spin_lock_irqsave(&adapter->cmd_lock, flags); 387 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 388 VMXNET3_CMD_UPDATE_FEATURE); 389 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 390 } 391 return 0; 392} 393 394static void 395vmxnet3_get_ethtool_stats(struct net_device *netdev, 396 struct ethtool_stats *stats, u64 *buf) 397{ 398 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 399 unsigned long flags; 400 u8 *base; 401 int i; 402 int j = 0; 403 404 spin_lock_irqsave(&adapter->cmd_lock, flags); 405 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 406 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 407 408 /* this does assume each counter is 64-bit wide */ 409 for (j = 0; j < adapter->num_tx_queues; j++) { 410 base = (u8 *)&adapter->tqd_start[j].stats; 411 *buf++ = (u64)j; 412 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 413 *buf++ = *(u64 *)(base + 414 vmxnet3_tq_dev_stats[i].offset); 415 416 base = (u8 *)&adapter->tx_queue[j].stats; 417 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 418 *buf++ = *(u64 *)(base + 419 vmxnet3_tq_driver_stats[i].offset); 420 } 421 422 for (j = 0; j < adapter->num_rx_queues; j++) { 423 base = (u8 *)&adapter->rqd_start[j].stats; 424 *buf++ = (u64) j; 425 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 426 *buf++ = *(u64 *)(base + 427 vmxnet3_rq_dev_stats[i].offset); 428 429 base = (u8 *)&adapter->rx_queue[j].stats; 430 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 431 *buf++ = *(u64 *)(base + 432 vmxnet3_rq_driver_stats[i].offset); 433 } 434 435 base = (u8 *)adapter; 436 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 437 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); 438} 439 440 441/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 442 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 443 * Therefore, if any registers are added, removed or modified, then a version 444 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 445 * --register-dump would be required. 446 */ 447static void 448vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 449{ 450 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 451 u32 *buf = p; 452 int i = 0, j = 0; 453 454 memset(p, 0, vmxnet3_get_regs_len(netdev)); 455 456 regs->version = 2; 457 458 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 459 460 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 461 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 462 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL); 463 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH); 464 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 465 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 466 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 467 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 468 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR); 469 470 buf[j++] = adapter->intr.num_intrs; 471 for (i = 0; i < adapter->intr.num_intrs; i++) { 472 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR 473 + i * VMXNET3_REG_ALIGN); 474 } 475 476 buf[j++] = adapter->num_tx_queues; 477 for (i = 0; i < adapter->num_tx_queues; i++) { 478 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 479 480 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD + 481 i * VMXNET3_REG_ALIGN); 482 483 buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA); 484 buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA); 485 buf[j++] = tq->tx_ring.size; 486 buf[j++] = tq->tx_ring.next2fill; 487 buf[j++] = tq->tx_ring.next2comp; 488 buf[j++] = tq->tx_ring.gen; 489 490 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); 491 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); 492 buf[j++] = tq->data_ring.size; 493 buf[j++] = tq->txdata_desc_size; 494 495 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); 496 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); 497 buf[j++] = tq->comp_ring.size; 498 buf[j++] = tq->comp_ring.next2proc; 499 buf[j++] = tq->comp_ring.gen; 500 501 buf[j++] = tq->stopped; 502 } 503 504 buf[j++] = adapter->num_rx_queues; 505 for (i = 0; i < adapter->num_rx_queues; i++) { 506 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 507 508 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD + 509 i * VMXNET3_REG_ALIGN); 510 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 + 511 i * VMXNET3_REG_ALIGN); 512 513 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); 514 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); 515 buf[j++] = rq->rx_ring[0].size; 516 buf[j++] = rq->rx_ring[0].next2fill; 517 buf[j++] = rq->rx_ring[0].next2comp; 518 buf[j++] = rq->rx_ring[0].gen; 519 520 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); 521 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); 522 buf[j++] = rq->rx_ring[1].size; 523 buf[j++] = rq->rx_ring[1].next2fill; 524 buf[j++] = rq->rx_ring[1].next2comp; 525 buf[j++] = rq->rx_ring[1].gen; 526 527 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA); 528 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA); 529 buf[j++] = rq->rx_ring[0].size; 530 buf[j++] = rq->data_ring.desc_size; 531 532 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); 533 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); 534 buf[j++] = rq->comp_ring.size; 535 buf[j++] = rq->comp_ring.next2proc; 536 buf[j++] = rq->comp_ring.gen; 537 } 538} 539 540 541static void 542vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 543{ 544 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 545 546 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; 547 wol->wolopts = adapter->wol; 548} 549 550 551static int 552vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 553{ 554 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 555 556 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | 557 WAKE_MAGICSECURE)) { 558 return -EOPNOTSUPP; 559 } 560 561 adapter->wol = wol->wolopts; 562 563 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 564 565 return 0; 566} 567 568 569static int 570vmxnet3_get_link_ksettings(struct net_device *netdev, 571 struct ethtool_link_ksettings *ecmd) 572{ 573 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 574 575 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 576 ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); 577 ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full); 578 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 579 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 580 ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP); 581 ecmd->base.port = PORT_TP; 582 583 if (adapter->link_speed) { 584 ecmd->base.speed = adapter->link_speed; 585 ecmd->base.duplex = DUPLEX_FULL; 586 } else { 587 ecmd->base.speed = SPEED_UNKNOWN; 588 ecmd->base.duplex = DUPLEX_UNKNOWN; 589 } 590 return 0; 591} 592 593 594static void 595vmxnet3_get_ringparam(struct net_device *netdev, 596 struct ethtool_ringparam *param) 597{ 598 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 599 600 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 601 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 602 param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ? 603 VMXNET3_RXDATA_DESC_MAX_SIZE : 0; 604 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; 605 606 param->rx_pending = adapter->rx_ring_size; 607 param->tx_pending = adapter->tx_ring_size; 608 param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ? 609 adapter->rxdata_desc_size : 0; 610 param->rx_jumbo_pending = adapter->rx_ring2_size; 611} 612 613 614static int 615vmxnet3_set_ringparam(struct net_device *netdev, 616 struct ethtool_ringparam *param) 617{ 618 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 619 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; 620 u16 new_rxdata_desc_size; 621 u32 sz; 622 int err = 0; 623 624 if (param->tx_pending == 0 || param->tx_pending > 625 VMXNET3_TX_RING_MAX_SIZE) 626 return -EINVAL; 627 628 if (param->rx_pending == 0 || param->rx_pending > 629 VMXNET3_RX_RING_MAX_SIZE) 630 return -EINVAL; 631 632 if (param->rx_jumbo_pending == 0 || 633 param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE) 634 return -EINVAL; 635 636 /* if adapter not yet initialized, do nothing */ 637 if (adapter->rx_buf_per_pkt == 0) { 638 netdev_err(netdev, "adapter not completely initialized, " 639 "ring size cannot be changed yet\n"); 640 return -EOPNOTSUPP; 641 } 642 643 if (VMXNET3_VERSION_GE_3(adapter)) { 644 if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) 645 return -EINVAL; 646 } else if (param->rx_mini_pending != 0) { 647 return -EINVAL; 648 } 649 650 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 651 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 652 ~VMXNET3_RING_SIZE_MASK; 653 new_tx_ring_size = min_t(u32, new_tx_ring_size, 654 VMXNET3_TX_RING_MAX_SIZE); 655 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % 656 VMXNET3_RING_SIZE_ALIGN) != 0) 657 return -EINVAL; 658 659 /* ring0 has to be a multiple of 660 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 661 */ 662 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 663 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; 664 new_rx_ring_size = min_t(u32, new_rx_ring_size, 665 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 666 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % 667 sz) != 0) 668 return -EINVAL; 669 670 /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */ 671 new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) & 672 ~VMXNET3_RING_SIZE_MASK; 673 new_rx_ring2_size = min_t(u32, new_rx_ring2_size, 674 VMXNET3_RX_RING2_MAX_SIZE); 675 676 /* rx data ring buffer size has to be a multiple of 677 * VMXNET3_RXDATA_DESC_SIZE_ALIGN 678 */ 679 new_rxdata_desc_size = 680 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) & 681 ~VMXNET3_RXDATA_DESC_SIZE_MASK; 682 new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size, 683 VMXNET3_RXDATA_DESC_MAX_SIZE); 684 685 if (new_tx_ring_size == adapter->tx_ring_size && 686 new_rx_ring_size == adapter->rx_ring_size && 687 new_rx_ring2_size == adapter->rx_ring2_size && 688 new_rxdata_desc_size == adapter->rxdata_desc_size) { 689 return 0; 690 } 691 692 /* 693 * Reset_work may be in the middle of resetting the device, wait for its 694 * completion. 695 */ 696 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 697 usleep_range(1000, 2000); 698 699 if (netif_running(netdev)) { 700 vmxnet3_quiesce_dev(adapter); 701 vmxnet3_reset_dev(adapter); 702 703 /* recreate the rx queue and the tx queue based on the 704 * new sizes */ 705 vmxnet3_tq_destroy_all(adapter); 706 vmxnet3_rq_destroy_all(adapter); 707 708 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 709 new_rx_ring_size, new_rx_ring2_size, 710 adapter->txdata_desc_size, 711 new_rxdata_desc_size); 712 if (err) { 713 /* failed, most likely because of OOM, try default 714 * size */ 715 netdev_err(netdev, "failed to apply new sizes, " 716 "try the default ones\n"); 717 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 718 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 719 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 720 new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? 721 VMXNET3_DEF_RXDATA_DESC_SIZE : 0; 722 723 err = vmxnet3_create_queues(adapter, 724 new_tx_ring_size, 725 new_rx_ring_size, 726 new_rx_ring2_size, 727 adapter->txdata_desc_size, 728 new_rxdata_desc_size); 729 if (err) { 730 netdev_err(netdev, "failed to create queues " 731 "with default sizes. Closing it\n"); 732 goto out; 733 } 734 } 735 736 err = vmxnet3_activate_dev(adapter); 737 if (err) 738 netdev_err(netdev, "failed to re-activate, error %d." 739 " Closing it\n", err); 740 } 741 adapter->tx_ring_size = new_tx_ring_size; 742 adapter->rx_ring_size = new_rx_ring_size; 743 adapter->rx_ring2_size = new_rx_ring2_size; 744 adapter->rxdata_desc_size = new_rxdata_desc_size; 745 746out: 747 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 748 if (err) 749 vmxnet3_force_close(adapter); 750 751 return err; 752} 753 754static int 755vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, 756 struct ethtool_rxnfc *info) 757{ 758 enum Vmxnet3_RSSField rss_fields; 759 760 if (netif_running(adapter->netdev)) { 761 unsigned long flags; 762 763 spin_lock_irqsave(&adapter->cmd_lock, flags); 764 765 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 766 VMXNET3_CMD_GET_RSS_FIELDS); 767 rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 768 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 769 } else { 770 rss_fields = adapter->rss_fields; 771 } 772 773 info->data = 0; 774 775 /* Report default options for RSS on vmxnet3 */ 776 switch (info->flow_type) { 777 case TCP_V4_FLOW: 778 case TCP_V6_FLOW: 779 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | 780 RXH_IP_SRC | RXH_IP_DST; 781 break; 782 case UDP_V4_FLOW: 783 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4) 784 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 785 info->data |= RXH_IP_SRC | RXH_IP_DST; 786 break; 787 case AH_ESP_V4_FLOW: 788 case AH_V4_FLOW: 789 case ESP_V4_FLOW: 790 if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) 791 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 792 fallthrough; 793 case SCTP_V4_FLOW: 794 case IPV4_FLOW: 795 info->data |= RXH_IP_SRC | RXH_IP_DST; 796 break; 797 case UDP_V6_FLOW: 798 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) 799 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 800 info->data |= RXH_IP_SRC | RXH_IP_DST; 801 break; 802 case AH_ESP_V6_FLOW: 803 case AH_V6_FLOW: 804 case ESP_V6_FLOW: 805 case SCTP_V6_FLOW: 806 case IPV6_FLOW: 807 info->data |= RXH_IP_SRC | RXH_IP_DST; 808 break; 809 default: 810 return -EINVAL; 811 } 812 813 return 0; 814} 815 816static int 817vmxnet3_set_rss_hash_opt(struct net_device *netdev, 818 struct vmxnet3_adapter *adapter, 819 struct ethtool_rxnfc *nfc) 820{ 821 enum Vmxnet3_RSSField rss_fields = adapter->rss_fields; 822 823 /* RSS does not support anything other than hashing 824 * to queues on src and dst IPs and ports 825 */ 826 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 827 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 828 return -EINVAL; 829 830 switch (nfc->flow_type) { 831 case TCP_V4_FLOW: 832 case TCP_V6_FLOW: 833 if (!(nfc->data & RXH_IP_SRC) || 834 !(nfc->data & RXH_IP_DST) || 835 !(nfc->data & RXH_L4_B_0_1) || 836 !(nfc->data & RXH_L4_B_2_3)) 837 return -EINVAL; 838 break; 839 case UDP_V4_FLOW: 840 if (!(nfc->data & RXH_IP_SRC) || 841 !(nfc->data & RXH_IP_DST)) 842 return -EINVAL; 843 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 844 case 0: 845 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; 846 break; 847 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 848 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; 849 break; 850 default: 851 return -EINVAL; 852 } 853 break; 854 case UDP_V6_FLOW: 855 if (!(nfc->data & RXH_IP_SRC) || 856 !(nfc->data & RXH_IP_DST)) 857 return -EINVAL; 858 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 859 case 0: 860 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; 861 break; 862 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 863 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; 864 break; 865 default: 866 return -EINVAL; 867 } 868 break; 869 case ESP_V4_FLOW: 870 case AH_V4_FLOW: 871 case AH_ESP_V4_FLOW: 872 if (!(nfc->data & RXH_IP_SRC) || 873 !(nfc->data & RXH_IP_DST)) 874 return -EINVAL; 875 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 876 case 0: 877 rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; 878 break; 879 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 880 rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; 881 break; 882 default: 883 return -EINVAL; 884 } 885 break; 886 case ESP_V6_FLOW: 887 case AH_V6_FLOW: 888 case AH_ESP_V6_FLOW: 889 case SCTP_V4_FLOW: 890 case SCTP_V6_FLOW: 891 if (!(nfc->data & RXH_IP_SRC) || 892 !(nfc->data & RXH_IP_DST) || 893 (nfc->data & RXH_L4_B_0_1) || 894 (nfc->data & RXH_L4_B_2_3)) 895 return -EINVAL; 896 break; 897 default: 898 return -EINVAL; 899 } 900 901 /* if we changed something we need to update flags */ 902 if (rss_fields != adapter->rss_fields) { 903 adapter->default_rss_fields = false; 904 if (netif_running(netdev)) { 905 struct Vmxnet3_DriverShared *shared = adapter->shared; 906 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 907 unsigned long flags; 908 909 spin_lock_irqsave(&adapter->cmd_lock, flags); 910 cmdInfo->setRssFields = rss_fields; 911 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 912 VMXNET3_CMD_SET_RSS_FIELDS); 913 914 /* Not all requested RSS may get applied, so get and 915 * cache what was actually applied. 916 */ 917 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 918 VMXNET3_CMD_GET_RSS_FIELDS); 919 adapter->rss_fields = 920 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 921 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 922 } else { 923 /* When the device is activated, we will try to apply 924 * these rules and cache the applied value later. 925 */ 926 adapter->rss_fields = rss_fields; 927 } 928 } 929 return 0; 930} 931 932static int 933vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 934 u32 *rules) 935{ 936 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 937 int err = 0; 938 939 switch (info->cmd) { 940 case ETHTOOL_GRXRINGS: 941 info->data = adapter->num_rx_queues; 942 break; 943 case ETHTOOL_GRXFH: 944 if (!VMXNET3_VERSION_GE_4(adapter)) { 945 err = -EOPNOTSUPP; 946 break; 947 } 948#ifdef VMXNET3_RSS 949 if (!adapter->rss) { 950 err = -EOPNOTSUPP; 951 break; 952 } 953#endif 954 err = vmxnet3_get_rss_hash_opts(adapter, info); 955 break; 956 default: 957 err = -EOPNOTSUPP; 958 break; 959 } 960 961 return err; 962} 963 964static int 965vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 966{ 967 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 968 int err = 0; 969 970 if (!VMXNET3_VERSION_GE_4(adapter)) { 971 err = -EOPNOTSUPP; 972 goto done; 973 } 974#ifdef VMXNET3_RSS 975 if (!adapter->rss) { 976 err = -EOPNOTSUPP; 977 goto done; 978 } 979#endif 980 981 switch (info->cmd) { 982 case ETHTOOL_SRXFH: 983 err = vmxnet3_set_rss_hash_opt(netdev, adapter, info); 984 break; 985 default: 986 err = -EOPNOTSUPP; 987 break; 988 } 989 990done: 991 return err; 992} 993 994#ifdef VMXNET3_RSS 995static u32 996vmxnet3_get_rss_indir_size(struct net_device *netdev) 997{ 998 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 999 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1000 1001 return rssConf->indTableSize; 1002} 1003 1004static int 1005vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) 1006{ 1007 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1008 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1009 unsigned int n = rssConf->indTableSize; 1010 1011 if (hfunc) 1012 *hfunc = ETH_RSS_HASH_TOP; 1013 if (!p) 1014 return 0; 1015 if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) 1016 return 0; 1017 while (n--) 1018 p[n] = rssConf->indTable[n]; 1019 return 0; 1020 1021} 1022 1023static int 1024vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, 1025 const u8 hfunc) 1026{ 1027 unsigned int i; 1028 unsigned long flags; 1029 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1030 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1031 1032 /* We do not allow change in unsupported parameters */ 1033 if (key || 1034 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1035 return -EOPNOTSUPP; 1036 if (!p) 1037 return 0; 1038 for (i = 0; i < rssConf->indTableSize; i++) 1039 rssConf->indTable[i] = p[i]; 1040 1041 spin_lock_irqsave(&adapter->cmd_lock, flags); 1042 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1043 VMXNET3_CMD_UPDATE_RSSIDT); 1044 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1045 1046 return 0; 1047 1048} 1049#endif 1050 1051static int 1052vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 1053{ 1054 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1055 1056 if (!VMXNET3_VERSION_GE_3(adapter)) 1057 return -EOPNOTSUPP; 1058 1059 switch (adapter->coal_conf->coalMode) { 1060 case VMXNET3_COALESCE_DISABLED: 1061 /* struct ethtool_coalesce is already initialized to 0 */ 1062 break; 1063 case VMXNET3_COALESCE_ADAPT: 1064 ec->use_adaptive_rx_coalesce = true; 1065 break; 1066 case VMXNET3_COALESCE_STATIC: 1067 ec->tx_max_coalesced_frames = 1068 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth; 1069 ec->rx_max_coalesced_frames = 1070 adapter->coal_conf->coalPara.coalStatic.rx_depth; 1071 break; 1072 case VMXNET3_COALESCE_RBC: { 1073 u32 rbc_rate; 1074 1075 rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate; 1076 ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate); 1077 } 1078 break; 1079 default: 1080 return -EOPNOTSUPP; 1081 } 1082 1083 return 0; 1084} 1085 1086static int 1087vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 1088{ 1089 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1090 struct Vmxnet3_DriverShared *shared = adapter->shared; 1091 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 1092 unsigned long flags; 1093 1094 if (!VMXNET3_VERSION_GE_3(adapter)) 1095 return -EOPNOTSUPP; 1096 1097 if ((ec->rx_coalesce_usecs == 0) && 1098 (ec->use_adaptive_rx_coalesce == 0) && 1099 (ec->tx_max_coalesced_frames == 0) && 1100 (ec->rx_max_coalesced_frames == 0)) { 1101 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1102 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; 1103 goto done; 1104 } 1105 1106 if (ec->rx_coalesce_usecs != 0) { 1107 u32 rbc_rate; 1108 1109 if ((ec->use_adaptive_rx_coalesce != 0) || 1110 (ec->tx_max_coalesced_frames != 0) || 1111 (ec->rx_max_coalesced_frames != 0)) { 1112 return -EINVAL; 1113 } 1114 1115 rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs); 1116 if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE || 1117 rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) { 1118 return -EINVAL; 1119 } 1120 1121 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1122 adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC; 1123 adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate; 1124 goto done; 1125 } 1126 1127 if (ec->use_adaptive_rx_coalesce != 0) { 1128 if ((ec->rx_coalesce_usecs != 0) || 1129 (ec->tx_max_coalesced_frames != 0) || 1130 (ec->rx_max_coalesced_frames != 0)) { 1131 return -EINVAL; 1132 } 1133 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1134 adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT; 1135 goto done; 1136 } 1137 1138 if ((ec->tx_max_coalesced_frames != 0) || 1139 (ec->rx_max_coalesced_frames != 0)) { 1140 if ((ec->rx_coalesce_usecs != 0) || 1141 (ec->use_adaptive_rx_coalesce != 0)) { 1142 return -EINVAL; 1143 } 1144 1145 if ((ec->tx_max_coalesced_frames > 1146 VMXNET3_COAL_STATIC_MAX_DEPTH) || 1147 (ec->rx_max_coalesced_frames > 1148 VMXNET3_COAL_STATIC_MAX_DEPTH)) { 1149 return -EINVAL; 1150 } 1151 1152 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1153 adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC; 1154 1155 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth = 1156 (ec->tx_max_coalesced_frames ? 1157 ec->tx_max_coalesced_frames : 1158 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1159 1160 adapter->coal_conf->coalPara.coalStatic.rx_depth = 1161 (ec->rx_max_coalesced_frames ? 1162 ec->rx_max_coalesced_frames : 1163 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1164 1165 adapter->coal_conf->coalPara.coalStatic.tx_depth = 1166 VMXNET3_COAL_STATIC_DEFAULT_DEPTH; 1167 goto done; 1168 } 1169 1170done: 1171 adapter->default_coal_mode = false; 1172 if (netif_running(netdev)) { 1173 spin_lock_irqsave(&adapter->cmd_lock, flags); 1174 cmdInfo->varConf.confVer = 1; 1175 cmdInfo->varConf.confLen = 1176 cpu_to_le32(sizeof(*adapter->coal_conf)); 1177 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); 1178 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1179 VMXNET3_CMD_SET_COALESCE); 1180 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1181 } 1182 1183 return 0; 1184} 1185 1186static const struct ethtool_ops vmxnet3_ethtool_ops = { 1187 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1188 ETHTOOL_COALESCE_MAX_FRAMES | 1189 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1190 .get_drvinfo = vmxnet3_get_drvinfo, 1191 .get_regs_len = vmxnet3_get_regs_len, 1192 .get_regs = vmxnet3_get_regs, 1193 .get_wol = vmxnet3_get_wol, 1194 .set_wol = vmxnet3_set_wol, 1195 .get_link = ethtool_op_get_link, 1196 .get_coalesce = vmxnet3_get_coalesce, 1197 .set_coalesce = vmxnet3_set_coalesce, 1198 .get_strings = vmxnet3_get_strings, 1199 .get_sset_count = vmxnet3_get_sset_count, 1200 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 1201 .get_ringparam = vmxnet3_get_ringparam, 1202 .set_ringparam = vmxnet3_set_ringparam, 1203 .get_rxnfc = vmxnet3_get_rxnfc, 1204 .set_rxnfc = vmxnet3_set_rxnfc, 1205#ifdef VMXNET3_RSS 1206 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, 1207 .get_rxfh = vmxnet3_get_rss, 1208 .set_rxfh = vmxnet3_set_rss, 1209#endif 1210 .get_link_ksettings = vmxnet3_get_link_ksettings, 1211}; 1212 1213void vmxnet3_set_ethtool_ops(struct net_device *netdev) 1214{ 1215 netdev->ethtool_ops = &vmxnet3_ethtool_ops; 1216} 1217