Lines Matching defs:adapter

100 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
101 static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
102 static void send_query_map(struct ibmvnic_adapter *adapter);
105 static int send_login(struct ibmvnic_adapter *adapter);
106 static void send_query_cap(struct ibmvnic_adapter *adapter);
108 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
112 static int init_crq_queue(struct ibmvnic_adapter *adapter);
113 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
114 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
116 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
118 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
119 static void flush_reset_queue(struct ibmvnic_adapter *adapter);
155 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
163 return ibmvnic_send_crq(adapter, &crq);
166 static int send_version_xchg(struct ibmvnic_adapter *adapter)
175 return ibmvnic_send_crq(adapter, &crq);
178 static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
187 netdev_warn(adapter->netdev,
192 static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
199 rxqs = adapter->rx_scrq;
200 txqs = adapter->tx_scrq;
201 num_txqs = adapter->num_active_tx_scrqs;
202 num_rxqs = adapter->num_active_rx_scrqs;
204 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
207 ibmvnic_clean_queue_affinity(adapter, txqs[i]);
211 ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
250 static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
252 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
253 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
255 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
256 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
262 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
263 if (!(adapter->rx_scrq && adapter->tx_scrq)) {
264 netdev_warn(adapter->netdev,
299 rc = __netif_set_xps_queue(adapter->netdev,
303 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
309 netdev_warn(adapter->netdev,
312 ibmvnic_clean_affinity(adapter);
318 struct ibmvnic_adapter *adapter;
320 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
321 ibmvnic_set_affinity(adapter);
327 struct ibmvnic_adapter *adapter;
329 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
330 ibmvnic_set_affinity(adapter);
336 struct ibmvnic_adapter *adapter;
338 adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
339 ibmvnic_clean_affinity(adapter);
345 static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
349 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
353 &adapter->node_dead);
356 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
360 static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
362 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
364 &adapter->node_dead);
383 * @adapter: private device data
390 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
398 netdev = adapter->netdev;
402 if (!adapter->crq.active) {
432 * @adapter: ibmvnic adapter associated to the LTB
445 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
450 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
453 struct device *dev = &adapter->vdev->dev;
462 free_long_term_buff(adapter, ltb);
477 ltb->map_id = find_first_zero_bit(adapter->map_ids,
479 bitmap_set(adapter->map_ids, ltb->map_id, 1);
489 mutex_lock(&adapter->fw_lock);
490 adapter->fw_done_rc = 0;
491 reinit_completion(&adapter->fw_done);
493 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
499 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
506 if (adapter->fw_done_rc) {
508 adapter->fw_done_rc);
515 mutex_unlock(&adapter->fw_lock);
519 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
522 struct device *dev = &adapter->vdev->dev;
531 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
532 adapter->reset_reason != VNIC_RESET_MOBILITY &&
533 adapter->reset_reason != VNIC_RESET_TIMEOUT)
534 send_request_unmap(adapter, ltb->map_id);
540 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
546 * @adapter: The ibmvnic adapter containing this ltb set
552 static void free_ltb_set(struct ibmvnic_adapter *adapter,
558 free_long_term_buff(adapter, &ltb_set->ltbs[i]);
568 * @adapter: ibmvnic adapter associated to the LTB
582 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
586 struct device *dev = &adapter->vdev->dev;
625 free_long_term_buff(adapter, &old_set.ltbs[i]);
662 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
746 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
750 for (i = 0; i < adapter->num_active_rx_pools; i++)
751 adapter->rx_pool[i].active = 0;
754 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
758 u64 handle = adapter->rx_scrq[pool->index]->handle;
759 struct device *dev = &adapter->vdev->dev;
777 rx_scrq = adapter->rx_scrq[pool->index];
796 skb = netdev_alloc_skb(adapter->netdev,
800 adapter->replenish_no_mem++;
844 send_subcrq_indirect(adapter, handle,
850 adapter->replenish_add_buff_success += ind_bufp->index;
873 adapter->replenish_add_buff_failure += ind_bufp->index;
876 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
882 deactivate_rx_pools(adapter);
883 netif_carrier_off(adapter->netdev);
887 static void replenish_pools(struct ibmvnic_adapter *adapter)
891 adapter->replenish_task_cycles++;
892 for (i = 0; i < adapter->num_active_rx_pools; i++) {
893 if (adapter->rx_pool[i].active)
894 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
897 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
900 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
902 kfree(adapter->tx_stats_buffers);
903 kfree(adapter->rx_stats_buffers);
904 adapter->tx_stats_buffers = NULL;
905 adapter->rx_stats_buffers = NULL;
908 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
910 adapter->tx_stats_buffers =
914 if (!adapter->tx_stats_buffers)
917 adapter->rx_stats_buffers =
921 if (!adapter->rx_stats_buffers)
927 static void release_stats_token(struct ibmvnic_adapter *adapter)
929 struct device *dev = &adapter->vdev->dev;
931 if (!adapter->stats_token)
934 dma_unmap_single(dev, adapter->stats_token,
937 adapter->stats_token = 0;
940 static int init_stats_token(struct ibmvnic_adapter *adapter)
942 struct device *dev = &adapter->vdev->dev;
946 stok = dma_map_single(dev, &adapter->stats,
955 adapter->stats_token = stok;
956 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
961 * release_rx_pools() - Release any rx pools attached to @adapter.
962 * @adapter: ibmvnic adapter
966 static void release_rx_pools(struct ibmvnic_adapter *adapter)
971 if (!adapter->rx_pool)
974 for (i = 0; i < adapter->num_active_rx_pools; i++) {
975 rx_pool = &adapter->rx_pool[i];
977 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
981 free_ltb_set(adapter, &rx_pool->ltb_set);
996 kfree(adapter->rx_pool);
997 adapter->rx_pool = NULL;
998 adapter->num_active_rx_pools = 0;
999 adapter->prev_rx_pool_size = 0;
1004 * @adapter: ibmvnic adapter
1006 * Check if the existing rx pools in the adapter can be reused. The
1016 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1022 if (!adapter->rx_pool)
1025 old_num_pools = adapter->num_active_rx_pools;
1026 new_num_pools = adapter->req_rx_queues;
1028 old_pool_size = adapter->prev_rx_pool_size;
1029 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1031 old_buff_size = adapter->prev_rx_buf_sz;
1032 new_buff_size = adapter->cur_rx_buf_sz;
1043 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1046 * Initialize the set of receiver pools in the ibmvnic adapter associated
1055 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1056 struct device *dev = &adapter->vdev->dev;
1063 pool_size = adapter->req_rx_add_entries_per_subcrq;
1064 num_pools = adapter->req_rx_queues;
1065 buff_size = adapter->cur_rx_buf_sz;
1067 if (reuse_rx_pools(adapter)) {
1073 release_rx_pools(adapter);
1075 adapter->rx_pool = kcalloc(num_pools,
1078 if (!adapter->rx_pool) {
1086 adapter->num_active_rx_pools = num_pools;
1089 rx_pool = &adapter->rx_pool[i];
1091 netdev_dbg(adapter->netdev,
1117 adapter->prev_rx_pool_size = pool_size;
1118 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
1122 rx_pool = &adapter->rx_pool[i];
1126 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1160 release_rx_pools(adapter);
1168 static void release_vpd_data(struct ibmvnic_adapter *adapter)
1170 if (!adapter->vpd)
1173 kfree(adapter->vpd->buff);
1174 kfree(adapter->vpd);
1176 adapter->vpd = NULL;
1179 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1184 free_ltb_set(adapter, &tx_pool->ltb_set);
1188 * release_tx_pools() - Release any tx pools attached to @adapter.
1189 * @adapter: ibmvnic adapter
1193 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1200 if (!adapter->tx_pool)
1203 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1204 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1205 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1208 kfree(adapter->tx_pool);
1209 adapter->tx_pool = NULL;
1210 kfree(adapter->tso_pool);
1211 adapter->tso_pool = NULL;
1212 adapter->num_active_tx_pools = 0;
1213 adapter->prev_tx_pool_size = 0;
1248 * @adapter: ibmvnic adapter
1250 * Check if the existing tx pools in the adapter can be reused. The
1259 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1265 if (!adapter->tx_pool)
1268 old_num_pools = adapter->num_active_tx_pools;
1269 new_num_pools = adapter->num_active_tx_scrqs;
1270 old_pool_size = adapter->prev_tx_pool_size;
1271 new_pool_size = adapter->req_tx_entries_per_subcrq;
1272 old_mtu = adapter->prev_mtu;
1273 new_mtu = adapter->req_mtu;
1284 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1287 * Initialize the set of transmit pools in the ibmvnic adapter associated
1296 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1297 struct device *dev = &adapter->vdev->dev;
1303 num_pools = adapter->req_tx_queues;
1309 if (reuse_tx_pools(adapter)) {
1315 release_tx_pools(adapter);
1317 pool_size = adapter->req_tx_entries_per_subcrq;
1318 num_pools = adapter->num_active_tx_scrqs;
1320 adapter->tx_pool = kcalloc(num_pools,
1322 if (!adapter->tx_pool)
1325 adapter->tso_pool = kcalloc(num_pools,
1330 if (!adapter->tso_pool) {
1331 kfree(adapter->tx_pool);
1332 adapter->tx_pool = NULL;
1339 adapter->num_active_tx_pools = num_pools;
1341 buff_size = adapter->req_mtu + VLAN_HLEN;
1346 i, adapter->req_tx_entries_per_subcrq, buff_size);
1348 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1353 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1360 adapter->prev_tx_pool_size = pool_size;
1361 adapter->prev_mtu = adapter->req_mtu;
1375 tx_pool = &adapter->tx_pool[i];
1380 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1391 tso_pool = &adapter->tso_pool[i];
1396 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1410 release_tx_pools(adapter);
1418 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1422 if (adapter->napi_enabled)
1425 for (i = 0; i < adapter->req_rx_queues; i++)
1426 napi_enable(&adapter->napi[i]);
1428 adapter->napi_enabled = true;
1431 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1435 if (!adapter->napi_enabled)
1438 for (i = 0; i < adapter->req_rx_queues; i++) {
1439 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1440 napi_disable(&adapter->napi[i]);
1443 adapter->napi_enabled = false;
1446 static int init_napi(struct ibmvnic_adapter *adapter)
1450 adapter->napi = kcalloc(adapter->req_rx_queues,
1452 if (!adapter->napi)
1455 for (i = 0; i < adapter->req_rx_queues; i++) {
1456 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1457 netif_napi_add(adapter->netdev, &adapter->napi[i],
1461 adapter->num_active_rx_napi = adapter->req_rx_queues;
1465 static void release_napi(struct ibmvnic_adapter *adapter)
1469 if (!adapter->napi)
1472 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1473 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1474 netif_napi_del(&adapter->napi[i]);
1477 kfree(adapter->napi);
1478 adapter->napi = NULL;
1479 adapter->num_active_rx_napi = 0;
1480 adapter->napi_enabled = false;
1511 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1524 adapter->init_done_rc = 0;
1525 reinit_completion(&adapter->init_done);
1526 rc = send_login(adapter);
1530 if (!wait_for_completion_timeout(&adapter->init_done,
1533 adapter->login_pending = false;
1537 if (adapter->init_done_rc == ABORTED) {
1540 adapter->init_done_rc = 0;
1546 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1548 release_sub_crqs(adapter, 1);
1553 adapter->init_done_rc = 0;
1554 reinit_completion(&adapter->init_done);
1555 send_query_cap(adapter);
1556 if (!wait_for_completion_timeout(&adapter->init_done,
1563 rc = init_sub_crqs(adapter);
1570 rc = init_sub_crq_irqs(adapter);
1577 } else if (adapter->init_done_rc) {
1579 adapter->init_done_rc);
1582 /* adapter login failed, so free any CRQs or sub-CRQs
1591 adapter->init_done_rc = 0;
1592 release_sub_crqs(adapter, true);
1600 reinit_init_done(adapter);
1604 adapter->failover_pending = false;
1605 release_crq_queue(adapter);
1616 spin_lock_irqsave(&adapter->rwi_lock, flags);
1617 flush_reset_queue(adapter);
1618 spin_unlock_irqrestore(&adapter->rwi_lock,
1621 rc = init_crq_queue(adapter);
1628 rc = ibmvnic_reset_init(adapter, false);
1642 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1644 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1648 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1650 if (!adapter->login_buf)
1653 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1654 adapter->login_buf_sz, DMA_TO_DEVICE);
1655 kfree(adapter->login_buf);
1656 adapter->login_buf = NULL;
1659 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1661 if (!adapter->login_rsp_buf)
1664 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1665 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
1666 kfree(adapter->login_rsp_buf);
1667 adapter->login_rsp_buf = NULL;
1670 static void release_resources(struct ibmvnic_adapter *adapter)
1672 release_vpd_data(adapter);
1674 release_napi(adapter);
1675 release_login_buffer(adapter);
1676 release_login_rsp_buffer(adapter);
1679 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1681 struct net_device *netdev = adapter->netdev;
1697 reinit_completion(&adapter->init_done);
1698 rc = ibmvnic_send_crq(adapter, &crq);
1704 if (!wait_for_completion_timeout(&adapter->init_done,
1710 if (adapter->init_done_rc == PARTIALSUCCESS) {
1714 } else if (adapter->init_done_rc) {
1716 adapter->init_done_rc);
1717 return adapter->init_done_rc;
1726 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1730 adapter->req_tx_queues, adapter->req_rx_queues);
1732 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1738 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1745 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1747 struct device *dev = &adapter->vdev->dev;
1752 if (adapter->vpd->buff)
1753 len = adapter->vpd->len;
1755 mutex_lock(&adapter->fw_lock);
1756 adapter->fw_done_rc = 0;
1757 reinit_completion(&adapter->fw_done);
1761 rc = ibmvnic_send_crq(adapter, &crq);
1763 mutex_unlock(&adapter->fw_lock);
1767 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1770 mutex_unlock(&adapter->fw_lock);
1773 mutex_unlock(&adapter->fw_lock);
1775 if (!adapter->vpd->len)
1778 if (!adapter->vpd->buff)
1779 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1780 else if (adapter->vpd->len != len)
1781 adapter->vpd->buff =
1782 krealloc(adapter->vpd->buff,
1783 adapter->vpd->len, GFP_KERNEL);
1785 if (!adapter->vpd->buff) {
1790 adapter->vpd->dma_addr =
1791 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1793 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1795 kfree(adapter->vpd->buff);
1796 adapter->vpd->buff = NULL;
1800 mutex_lock(&adapter->fw_lock);
1801 adapter->fw_done_rc = 0;
1802 reinit_completion(&adapter->fw_done);
1806 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1807 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1808 rc = ibmvnic_send_crq(adapter, &crq);
1810 kfree(adapter->vpd->buff);
1811 adapter->vpd->buff = NULL;
1812 mutex_unlock(&adapter->fw_lock);
1816 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1819 kfree(adapter->vpd->buff);
1820 adapter->vpd->buff = NULL;
1821 mutex_unlock(&adapter->fw_lock);
1825 mutex_unlock(&adapter->fw_lock);
1829 static int init_resources(struct ibmvnic_adapter *adapter)
1831 struct net_device *netdev = adapter->netdev;
1838 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1839 if (!adapter->vpd)
1843 rc = ibmvnic_get_vpd(adapter);
1849 rc = init_napi(adapter);
1853 send_query_map(adapter);
1865 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1866 enum vnic_state prev_state = adapter->state;
1869 adapter->state = VNIC_OPENING;
1870 replenish_pools(adapter);
1871 ibmvnic_napi_enable(adapter);
1876 for (i = 0; i < adapter->req_rx_queues; i++) {
1879 enable_irq(adapter->rx_scrq[i]->irq);
1880 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1883 for (i = 0; i < adapter->req_tx_queues; i++) {
1886 enable_irq(adapter->tx_scrq[i]->irq);
1887 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1894 if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
1898 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1900 ibmvnic_napi_disable(adapter);
1901 ibmvnic_disable_irqs(adapter);
1905 adapter->tx_queues_active = true;
1917 for (i = 0; i < adapter->req_rx_queues; i++)
1918 napi_schedule(&adapter->napi[i]);
1921 adapter->state = VNIC_OPEN;
1927 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1936 * It should be safe to overwrite the adapter->state here. Since
1943 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1945 adapter_state_to_string(adapter->state),
1946 adapter->failover_pending);
1947 adapter->state = VNIC_OPEN;
1952 if (adapter->state != VNIC_CLOSED) {
1957 rc = init_resources(adapter);
1972 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1973 adapter->state = VNIC_OPEN;
1978 release_resources(adapter);
1979 release_rx_pools(adapter);
1980 release_tx_pools(adapter);
1986 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1994 if (!adapter->rx_pool)
1997 rx_scrqs = adapter->num_active_rx_pools;
1998 rx_entries = adapter->req_rx_add_entries_per_subcrq;
2002 rx_pool = &adapter->rx_pool[i];
2006 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2017 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2038 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2043 if (!adapter->tx_pool || !adapter->tso_pool)
2046 tx_scrqs = adapter->num_active_tx_pools;
2050 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2051 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2052 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2056 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2058 struct net_device *netdev = adapter->netdev;
2061 if (adapter->tx_scrq) {
2062 for (i = 0; i < adapter->req_tx_queues; i++)
2063 if (adapter->tx_scrq[i]->irq) {
2066 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
2067 disable_irq(adapter->tx_scrq[i]->irq);
2071 if (adapter->rx_scrq) {
2072 for (i = 0; i < adapter->req_rx_queues; i++) {
2073 if (adapter->rx_scrq[i]->irq) {
2076 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
2077 disable_irq(adapter->rx_scrq[i]->irq);
2085 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2089 adapter->tx_queues_active = false;
2096 if (test_bit(0, &adapter->resetting))
2101 ibmvnic_napi_disable(adapter);
2102 ibmvnic_disable_irqs(adapter);
2107 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2110 adapter->state = VNIC_CLOSING;
2111 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2112 adapter->state = VNIC_CLOSED;
2118 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2122 adapter_state_to_string(adapter->state),
2123 adapter->failover_pending,
2124 adapter->force_reset_recovery);
2129 if (adapter->failover_pending) {
2130 adapter->state = VNIC_CLOSED;
2136 clean_rx_pools(adapter);
2137 clean_tx_pools(adapter);
2294 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2316 tx_pool = &adapter->tso_pool[queue_num];
2319 tx_pool = &adapter->tx_pool[queue_num];
2326 adapter->netdev->stats.tx_packets--;
2327 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2328 adapter->tx_stats_buffers[queue_num].packets--;
2329 adapter->tx_stats_buffers[queue_num].bytes -=
2333 adapter->netdev->stats.tx_dropped++;
2339 (adapter->req_tx_entries_per_subcrq / 2) &&
2340 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2343 if (adapter->tx_queues_active) {
2344 netif_wake_subqueue(adapter->netdev, queue_num);
2345 netdev_dbg(adapter->netdev, "Started queue %d\n",
2353 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2369 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2371 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2379 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2381 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2382 struct device *dev = &adapter->vdev->dev;
2410 if (!adapter->tx_queues_active) {
2419 tx_scrq = adapter->tx_scrq[queue_num];
2427 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2432 tx_pool = &adapter->tso_pool[queue_num];
2434 tx_pool = &adapter->tx_pool[queue_num];
2442 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2501 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2536 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2548 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2554 >= adapter->req_tx_entries_per_subcrq) {
2576 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2590 adapter->tx_send_failed += tx_send_failed;
2591 adapter->tx_map_failed += tx_map_failed;
2592 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2593 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2594 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2601 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2610 if (!adapter->promisc_supported)
2619 ibmvnic_send_crq(adapter, &crq);
2626 ibmvnic_send_crq(adapter, &crq);
2636 ibmvnic_send_crq(adapter, &crq);
2644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2658 mutex_lock(&adapter->fw_lock);
2659 adapter->fw_done_rc = 0;
2660 reinit_completion(&adapter->fw_done);
2662 rc = ibmvnic_send_crq(adapter, &crq);
2665 mutex_unlock(&adapter->fw_lock);
2669 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2671 if (rc || adapter->fw_done_rc) {
2673 mutex_unlock(&adapter->fw_lock);
2676 mutex_unlock(&adapter->fw_lock);
2679 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2685 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2693 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2694 if (adapter->state != VNIC_PROBED)
2728 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2730 reinit_completion(&adapter->init_done);
2731 adapter->init_done_rc = 0;
2738 static int do_reset(struct ibmvnic_adapter *adapter,
2741 struct net_device *netdev = adapter->netdev;
2746 netdev_dbg(adapter->netdev,
2748 adapter_state_to_string(adapter->state),
2749 adapter->failover_pending,
2753 adapter->reset_reason = rwi->reset_reason;
2755 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2763 adapter->failover_pending = false;
2766 reset_state = adapter->state;
2775 old_num_rx_queues = adapter->req_rx_queues;
2776 old_num_tx_queues = adapter->req_tx_queues;
2777 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2778 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2783 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2784 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2785 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2790 adapter->state = VNIC_CLOSING;
2798 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2803 if (adapter->state == VNIC_OPEN) {
2806 * set the adapter state to OPEN. Update our
2814 adapter->state = VNIC_CLOSING;
2817 if (adapter->state != VNIC_CLOSING) {
2818 /* If someone else changed the adapter state
2824 adapter->state = VNIC_CLOSED;
2828 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2829 release_resources(adapter);
2830 release_sub_crqs(adapter, 1);
2831 release_crq_queue(adapter);
2834 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2838 adapter->state = VNIC_PROBED;
2840 reinit_init_done(adapter);
2842 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2843 rc = init_crq_queue(adapter);
2844 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2845 rc = ibmvnic_reenable_crq_queue(adapter);
2846 release_sub_crqs(adapter, 1);
2848 rc = ibmvnic_reset_crq(adapter);
2850 rc = vio_enable_interrupts(adapter->vdev);
2852 netdev_err(adapter->netdev,
2859 netdev_err(adapter->netdev,
2864 rc = ibmvnic_reset_init(adapter, true);
2868 /* If the adapter was in PROBE or DOWN state prior to the reset,
2880 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2881 rc = init_resources(adapter);
2884 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2885 adapter->req_tx_queues != old_num_tx_queues ||
2886 adapter->req_rx_add_entries_per_subcrq !=
2888 adapter->req_tx_entries_per_subcrq !=
2890 !adapter->rx_pool ||
2891 !adapter->tso_pool ||
2892 !adapter->tx_pool) {
2893 release_napi(adapter);
2894 release_vpd_data(adapter);
2896 rc = init_resources(adapter);
2917 ibmvnic_disable_irqs(adapter);
2919 adapter->state = VNIC_CLOSED;
2935 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2936 adapter->reset_reason == VNIC_RESET_MOBILITY)
2942 /* restore the adapter state if reset failed */
2944 adapter->state = reset_state;
2946 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2949 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2950 adapter_state_to_string(adapter->state),
2951 adapter->failover_pending, rc);
2955 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2958 struct net_device *netdev = adapter->netdev;
2961 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2965 reset_state = adapter->state;
2973 adapter->reset_reason = rwi->reset_reason;
2976 release_resources(adapter);
2977 release_sub_crqs(adapter, 0);
2978 release_crq_queue(adapter);
2983 adapter->state = VNIC_PROBED;
2985 reinit_init_done(adapter);
2987 rc = init_crq_queue(adapter);
2989 netdev_err(adapter->netdev,
2994 rc = ibmvnic_reset_init(adapter, false);
2998 /* If the adapter was in PROBE or DOWN state prior to the reset,
3008 rc = init_resources(adapter);
3012 ibmvnic_disable_irqs(adapter);
3013 adapter->state = VNIC_CLOSED;
3026 /* restore adapter state if reset failed */
3028 adapter->state = reset_state;
3029 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
3030 adapter_state_to_string(adapter->state),
3031 adapter->failover_pending, rc);
3035 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3040 spin_lock_irqsave(&adapter->rwi_lock, flags);
3042 if (!list_empty(&adapter->rwi_list)) {
3043 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3050 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3056 * @adapter: ibmvnic_adapter struct
3068 static int do_passive_init(struct ibmvnic_adapter *adapter)
3071 struct net_device *netdev = adapter->netdev;
3072 struct device *dev = &adapter->vdev->dev;
3077 adapter->state = VNIC_PROBING;
3078 reinit_completion(&adapter->init_done);
3079 adapter->init_done_rc = 0;
3080 adapter->crq.active = true;
3082 rc = send_crq_init_complete(adapter);
3086 rc = send_version_xchg(adapter);
3088 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
3090 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3096 rc = init_sub_crqs(adapter);
3102 rc = init_sub_crq_irqs(adapter);
3108 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3109 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3110 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3112 adapter->state = VNIC_PROBED;
3118 release_sub_crqs(adapter, 1);
3120 adapter->state = VNIC_DOWN;
3126 struct ibmvnic_adapter *adapter;
3138 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3139 dev = &adapter->vdev->dev;
3150 if (adapter->state == VNIC_PROBING &&
3151 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3154 &adapter->ibmvnic_delayed_reset,
3159 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3160 if (adapter->state == VNIC_REMOVING)
3188 spin_lock(&adapter->rwi_lock);
3189 if (!list_empty(&adapter->rwi_list)) {
3190 if (test_and_set_bit_lock(0, &adapter->resetting)) {
3192 &adapter->ibmvnic_delayed_reset,
3198 spin_unlock(&adapter->rwi_lock);
3203 rwi = get_next_rwi(adapter);
3205 spin_lock_irqsave(&adapter->state_lock, flags);
3207 if (adapter->state == VNIC_REMOVING ||
3208 adapter->state == VNIC_REMOVED) {
3209 spin_unlock_irqrestore(&adapter->state_lock, flags);
3216 reset_state = adapter->state;
3219 spin_unlock_irqrestore(&adapter->state_lock, flags);
3223 rc = do_passive_init(adapter);
3226 netif_carrier_on(adapter->netdev);
3227 } else if (adapter->force_reset_recovery) {
3232 adapter->failover_pending = false;
3235 if (adapter->wait_for_reset) {
3237 adapter->force_reset_recovery = false;
3238 rc = do_hard_reset(adapter, rwi, reset_state);
3241 adapter->force_reset_recovery = false;
3242 rc = do_hard_reset(adapter, rwi, reset_state);
3255 * adapter some time to settle down before retrying.
3258 netdev_dbg(adapter->netdev,
3260 adapter_state_to_string(adapter->state),
3266 rc = do_reset(adapter, rwi, reset_state);
3269 adapter->last_reset_time = jiffies;
3272 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3274 rwi = get_next_rwi(adapter);
3278 * the adapter would be in an undefined state. So retry the
3293 adapter->force_reset_recovery = true;
3296 if (adapter->wait_for_reset) {
3297 adapter->reset_done_rc = rc;
3298 complete(&adapter->reset_done);
3301 clear_bit_unlock(0, &adapter->resetting);
3303 netdev_dbg(adapter->netdev,
3305 adapter_state_to_string(adapter->state),
3306 adapter->force_reset_recovery,
3307 adapter->wait_for_reset);
3312 struct ibmvnic_adapter *adapter;
3314 adapter = container_of(work, struct ibmvnic_adapter,
3316 __ibmvnic_reset(&adapter->ibmvnic_reset);
3319 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3323 if (!list_empty(&adapter->rwi_list)) {
3324 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3331 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3334 struct net_device *netdev = adapter->netdev;
3339 spin_lock_irqsave(&adapter->rwi_lock, flags);
3346 if (adapter->state == VNIC_REMOVING ||
3347 adapter->state == VNIC_REMOVED ||
3348 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3354 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3371 if (adapter->force_reset_recovery)
3372 flush_reset_queue(adapter);
3375 list_add_tail(&rwi->list, &adapter->rwi_list);
3376 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3378 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3383 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3393 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3395 if (test_bit(0, &adapter->resetting)) {
3396 netdev_err(adapter->netdev,
3403 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3407 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3410 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3413 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3426 struct ibmvnic_adapter *adapter;
3432 adapter = netdev_priv(netdev);
3433 scrq_num = (int)(napi - adapter->napi);
3435 rx_scrq = adapter->rx_scrq[scrq_num];
3446 if (unlikely(test_bit(0, &adapter->resetting) &&
3447 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3448 enable_scrq_irq(adapter, rx_scrq);
3453 if (!pending_scrq(adapter, rx_scrq))
3455 next = ibmvnic_next_scrq(adapter, rx_scrq);
3465 remove_buff_from_pool(adapter, rx_buff);
3470 remove_buff_from_pool(adapter, rx_buff);
3486 if (adapter->rx_vlan_header_insertion &&
3493 remove_buff_from_pool(adapter, rx_buff);
3508 adapter->rx_stats_buffers[scrq_num].packets++;
3509 adapter->rx_stats_buffers[scrq_num].bytes += length;
3513 if (adapter->state != VNIC_CLOSING &&
3514 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3515 adapter->req_rx_add_entries_per_subcrq / 2) ||
3517 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3520 enable_scrq_irq(adapter, rx_scrq);
3521 if (pending_scrq(adapter, rx_scrq)) {
3523 disable_scrq_irq(adapter, rx_scrq);
3532 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3536 adapter->fallback.mtu = adapter->req_mtu;
3537 adapter->fallback.rx_queues = adapter->req_rx_queues;
3538 adapter->fallback.tx_queues = adapter->req_tx_queues;
3539 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3540 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3542 reinit_completion(&adapter->reset_done);
3543 adapter->wait_for_reset = true;
3544 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3550 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3557 if (adapter->reset_done_rc) {
3559 adapter->desired.mtu = adapter->fallback.mtu;
3560 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3561 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3562 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3563 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3565 reinit_completion(&adapter->reset_done);
3566 adapter->wait_for_reset = true;
3567 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3572 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3580 adapter->wait_for_reset = false;
3587 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3589 adapter->desired.mtu = new_mtu + ETH_HLEN;
3591 return wait_for_reset(adapter);
3628 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3631 rc = send_query_phys_parms(adapter);
3633 adapter->speed = SPEED_UNKNOWN;
3634 adapter->duplex = DUPLEX_UNKNOWN;
3636 cmd->base.speed = adapter->speed;
3637 cmd->base.duplex = adapter->duplex;
3648 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3652 strscpy(info->fw_version, adapter->fw_version,
3658 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3660 return adapter->msg_enable;
3665 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3667 adapter->msg_enable = data;
3672 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3677 return adapter->logical_link_state;
3685 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3687 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3688 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3691 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3692 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3702 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3704 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3705 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3708 adapter->max_rx_add_entries_per_subcrq);
3710 adapter->max_tx_entries_per_subcrq);
3714 adapter->desired.rx_entries = ring->rx_pending;
3715 adapter->desired.tx_entries = ring->tx_pending;
3717 return wait_for_reset(adapter);
3723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3725 channels->max_rx = adapter->max_rx_queues;
3726 channels->max_tx = adapter->max_tx_queues;
3729 channels->rx_count = adapter->req_rx_queues;
3730 channels->tx_count = adapter->req_tx_queues;
3738 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3740 adapter->desired.rx_queues = channels->rx_count;
3741 adapter->desired.tx_queues = channels->tx_count;
3743 return wait_for_reset(adapter);
3748 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3757 for (i = 0; i < adapter->req_tx_queues; i++) {
3768 for (i = 0; i < adapter->req_rx_queues; i++) {
3782 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3787 adapter->req_tx_queues * NUM_TX_STATS +
3788 adapter->req_rx_queues * NUM_RX_STATS;
3797 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3805 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3810 reinit_completion(&adapter->stats_done);
3811 rc = ibmvnic_send_crq(adapter, &crq);
3814 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3820 (adapter, ibmvnic_stats[i].offset));
3822 for (j = 0; j < adapter->req_tx_queues; j++) {
3823 data[i] = adapter->tx_stats_buffers[j].packets;
3825 data[i] = adapter->tx_stats_buffers[j].bytes;
3827 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3831 for (j = 0; j < adapter->req_rx_queues; j++) {
3832 data[i] = adapter->rx_stats_buffers[j].packets;
3834 data[i] = adapter->rx_stats_buffers[j].bytes;
3836 data[i] = adapter->rx_stats_buffers[j].interrupts;
3858 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3864 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3880 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3884 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3889 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3893 if (!adapter->tx_scrq || !adapter->rx_scrq)
3896 ibmvnic_clean_affinity(adapter);
3898 for (i = 0; i < adapter->req_tx_queues; i++) {
3899 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3900 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3905 for (i = 0; i < adapter->req_rx_queues; i++) {
3906 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3907 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3915 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3919 struct device *dev = &adapter->vdev->dev;
3922 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3928 adapter->vdev->unit_address,
3933 netdev_err(adapter->netdev,
3952 *adapter)
3954 struct device *dev = &adapter->vdev->dev;
3978 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3982 rc = ibmvnic_reset_crq(adapter);
3985 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3991 scrq->adapter = adapter;
4006 netdev_dbg(adapter->netdev,
4015 adapter->vdev->unit_address,
4031 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4035 ibmvnic_clean_affinity(adapter);
4036 if (adapter->tx_scrq) {
4037 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4038 if (!adapter->tx_scrq[i])
4041 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4043 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4044 if (adapter->tx_scrq[i]->irq) {
4045 free_irq(adapter->tx_scrq[i]->irq,
4046 adapter->tx_scrq[i]);
4047 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4048 adapter->tx_scrq[i]->irq = 0;
4051 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4055 kfree(adapter->tx_scrq);
4056 adapter->tx_scrq = NULL;
4057 adapter->num_active_tx_scrqs = 0;
4060 if (adapter->rx_scrq) {
4061 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4062 if (!adapter->rx_scrq[i])
4065 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4067 if (adapter->rx_scrq[i]->irq) {
4068 free_irq(adapter->rx_scrq[i]->irq,
4069 adapter->rx_scrq[i]);
4070 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4071 adapter->rx_scrq[i]->irq = 0;
4074 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4078 kfree(adapter->rx_scrq);
4079 adapter->rx_scrq = NULL;
4080 adapter->num_active_rx_scrqs = 0;
4084 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4087 struct device *dev = &adapter->vdev->dev;
4090 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4122 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4125 struct device *dev = &adapter->vdev->dev;
4133 if (test_bit(0, &adapter->resetting) &&
4134 adapter->reset_reason == VNIC_RESET_MOBILITY) {
4138 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4146 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4149 struct device *dev = &adapter->vdev->dev;
4158 while (pending_scrq(adapter, scrq)) {
4164 next = ibmvnic_next_scrq(adapter, scrq);
4168 tx_pool = &adapter->tso_pool[pool];
4171 tx_pool = &adapter->tx_pool[pool];
4188 netdev_warn(adapter->netdev,
4199 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
4203 (adapter->req_tx_entries_per_subcrq / 2) &&
4204 __netif_subqueue_stopped(adapter->netdev,
4207 if (adapter->tx_queues_active) {
4208 netif_wake_subqueue(adapter->netdev,
4210 netdev_dbg(adapter->netdev,
4218 enable_scrq_irq(adapter, scrq);
4220 if (pending_scrq(adapter, scrq)) {
4221 disable_scrq_irq(adapter, scrq);
4231 struct ibmvnic_adapter *adapter = scrq->adapter;
4233 disable_scrq_irq(adapter, scrq);
4234 ibmvnic_complete_tx(adapter, scrq);
4242 struct ibmvnic_adapter *adapter = scrq->adapter;
4247 if (unlikely(adapter->state != VNIC_OPEN))
4250 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
4252 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4253 disable_scrq_irq(adapter, scrq);
4254 __napi_schedule(&adapter->napi[scrq->scrq_num]);
4260 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4262 struct device *dev = &adapter->vdev->dev;
4267 for (i = 0; i < adapter->req_tx_queues; i++) {
4268 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4270 scrq = adapter->tx_scrq[i];
4280 adapter->vdev->unit_address, i);
4292 for (i = 0; i < adapter->req_rx_queues; i++) {
4293 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4295 scrq = adapter->rx_scrq[i];
4303 adapter->vdev->unit_address, i);
4315 ibmvnic_set_affinity(adapter);
4322 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4323 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4325 i = adapter->req_tx_queues;
4328 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4329 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4331 release_sub_crqs(adapter, 1);
4335 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4337 struct device *dev = &adapter->vdev->dev;
4344 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4351 allqueues[i] = init_sub_crq_queue(adapter);
4361 adapter->min_tx_queues + adapter->min_rx_queues) {
4368 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4371 if (adapter->req_rx_queues > adapter->min_rx_queues)
4372 adapter->req_rx_queues--;
4377 if (adapter->req_tx_queues > adapter->min_tx_queues)
4378 adapter->req_tx_queues--;
4385 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4386 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4387 if (!adapter->tx_scrq)
4390 for (i = 0; i < adapter->req_tx_queues; i++) {
4391 adapter->tx_scrq[i] = allqueues[i];
4392 adapter->tx_scrq[i]->pool_index = i;
4393 adapter->num_active_tx_scrqs++;
4396 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4397 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4398 if (!adapter->rx_scrq)
4401 for (i = 0; i < adapter->req_rx_queues; i++) {
4402 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4403 adapter->rx_scrq[i]->scrq_num = i;
4404 adapter->num_active_rx_scrqs++;
4411 kfree(adapter->tx_scrq);
4412 adapter->tx_scrq = NULL;
4415 release_sub_crq_queue(adapter, allqueues[i], 1);
4420 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4422 struct device *dev = &adapter->vdev->dev;
4432 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4433 adapter->promisc_supported)
4442 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4444 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4445 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4450 if (adapter->desired.mtu)
4451 adapter->req_mtu = adapter->desired.mtu;
4453 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4455 if (!adapter->desired.tx_entries)
4456 adapter->desired.tx_entries =
4457 adapter->max_tx_entries_per_subcrq;
4458 if (!adapter->desired.rx_entries)
4459 adapter->desired.rx_entries =
4460 adapter->max_rx_add_entries_per_subcrq;
4463 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4465 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4466 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4467 adapter->desired.tx_entries = max_entries;
4470 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4471 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4472 adapter->desired.rx_entries = max_entries;
4475 if (adapter->desired.tx_entries)
4476 adapter->req_tx_entries_per_subcrq =
4477 adapter->desired.tx_entries;
4479 adapter->req_tx_entries_per_subcrq =
4480 adapter->max_tx_entries_per_subcrq;
4482 if (adapter->desired.rx_entries)
4483 adapter->req_rx_add_entries_per_subcrq =
4484 adapter->desired.rx_entries;
4486 adapter->req_rx_add_entries_per_subcrq =
4487 adapter->max_rx_add_entries_per_subcrq;
4489 if (adapter->desired.tx_queues)
4490 adapter->req_tx_queues =
4491 adapter->desired.tx_queues;
4493 adapter->req_tx_queues =
4494 adapter->opt_tx_comp_sub_queues;
4496 if (adapter->desired.rx_queues)
4497 adapter->req_rx_queues =
4498 adapter->desired.rx_queues;
4500 adapter->req_rx_queues =
4501 adapter->opt_rx_comp_queues;
4503 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4505 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4512 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4514 ibmvnic_send_crq(adapter, &crq);
4517 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4519 ibmvnic_send_crq(adapter, &crq);
4522 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4524 ibmvnic_send_crq(adapter, &crq);
4529 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4531 ibmvnic_send_crq(adapter, &crq);
4536 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4538 ibmvnic_send_crq(adapter, &crq);
4541 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4543 ibmvnic_send_crq(adapter, &crq);
4545 if (adapter->netdev->flags & IFF_PROMISC) {
4546 if (adapter->promisc_supported) {
4551 ibmvnic_send_crq(adapter, &crq);
4558 ibmvnic_send_crq(adapter, &crq);
4567 static int pending_scrq(struct ibmvnic_adapter *adapter,
4583 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4607 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4609 struct ibmvnic_crq_queue *queue = &adapter->crq;
4628 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4642 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4645 unsigned int ua = adapter->vdev->unit_address;
4646 struct device *dev = &adapter->vdev->dev;
4661 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4664 unsigned int ua = adapter->vdev->unit_address;
4665 struct device *dev = &adapter->vdev->dev;
4669 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4673 if (!adapter->crq.active &&
4698 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4700 struct device *dev = &adapter->vdev->dev;
4708 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4711 rc = ibmvnic_send_crq(adapter, &crq);
4733 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4744 len += strlen(adapter->netdev->name) + 1;
4749 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4771 len = strlen(adapter->netdev->name) + 1;
4773 strscpy(vlcd->name, adapter->netdev->name, len);
4776 static int send_login(struct ibmvnic_adapter *adapter)
4780 struct device *dev = &adapter->vdev->dev;
4793 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4794 netdev_err(adapter->netdev,
4799 release_login_buffer(adapter);
4800 release_login_rsp_buffer(adapter);
4802 client_data_len = vnic_client_data_len(adapter);
4806 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4821 sizeof(u64) * adapter->req_tx_queues +
4822 sizeof(u64) * adapter->req_rx_queues +
4823 sizeof(u64) * adapter->req_rx_queues +
4837 adapter->login_buf = login_buffer;
4838 adapter->login_buf_token = buffer_token;
4839 adapter->login_buf_sz = buffer_size;
4840 adapter->login_rsp_buf = login_rsp_buffer;
4841 adapter->login_rsp_buf_token = rsp_buffer_token;
4842 adapter->login_rsp_buf_sz = rsp_buffer_size;
4846 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4849 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4852 sizeof(u64) * adapter->req_tx_queues);
4860 sizeof(u64) * adapter->req_tx_queues);
4862 for (i = 0; i < adapter->req_tx_queues; i++) {
4863 if (adapter->tx_scrq[i]) {
4865 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4869 for (i = 0; i < adapter->req_rx_queues; i++) {
4870 if (adapter->rx_scrq[i]) {
4872 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4878 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4883 vnic_add_client_data(adapter, vlcd);
4885 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4886 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4887 netdev_dbg(adapter->netdev, "%016lx\n",
4888 ((unsigned long *)(adapter->login_buf))[i]);
4897 adapter->login_pending = true;
4898 rc = ibmvnic_send_crq(adapter, &crq);
4900 adapter->login_pending = false;
4901 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4912 adapter->login_rsp_buf = NULL;
4917 adapter->login_buf = NULL;
4922 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4933 return ibmvnic_send_crq(adapter, &crq);
4936 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4944 return ibmvnic_send_crq(adapter, &crq);
4947 static void send_query_map(struct ibmvnic_adapter *adapter)
4954 ibmvnic_send_crq(adapter, &crq);
4958 static void send_query_cap(struct ibmvnic_adapter *adapter)
4969 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4976 ibmvnic_send_crq(adapter, &crq);
4980 ibmvnic_send_crq(adapter, &crq);
4984 ibmvnic_send_crq(adapter, &crq);
4988 ibmvnic_send_crq(adapter, &crq);
4992 ibmvnic_send_crq(adapter, &crq);
4996 ibmvnic_send_crq(adapter, &crq);
5001 ibmvnic_send_crq(adapter, &crq);
5006 ibmvnic_send_crq(adapter, &crq);
5011 ibmvnic_send_crq(adapter, &crq);
5016 ibmvnic_send_crq(adapter, &crq);
5020 ibmvnic_send_crq(adapter, &crq);
5024 ibmvnic_send_crq(adapter, &crq);
5028 ibmvnic_send_crq(adapter, &crq);
5032 ibmvnic_send_crq(adapter, &crq);
5036 ibmvnic_send_crq(adapter, &crq);
5040 ibmvnic_send_crq(adapter, &crq);
5044 ibmvnic_send_crq(adapter, &crq);
5048 ibmvnic_send_crq(adapter, &crq);
5052 ibmvnic_send_crq(adapter, &crq);
5056 ibmvnic_send_crq(adapter, &crq);
5060 ibmvnic_send_crq(adapter, &crq);
5065 ibmvnic_send_crq(adapter, &crq);
5070 ibmvnic_send_crq(adapter, &crq);
5075 ibmvnic_send_crq(adapter, &crq);
5080 ibmvnic_send_crq(adapter, &crq);
5089 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
5092 struct device *dev = &adapter->vdev->dev;
5095 adapter->ip_offload_tok =
5097 &adapter->ip_offload_buf,
5101 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
5112 cpu_to_be32(adapter->ip_offload_tok);
5114 ibmvnic_send_crq(adapter, &crq);
5117 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
5119 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
5120 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5121 struct device *dev = &adapter->vdev->dev;
5125 adapter->ip_offload_ctrl_tok =
5128 sizeof(adapter->ip_offload_ctrl),
5131 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
5136 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5151 if (adapter->state != VNIC_PROBING) {
5152 old_hw_features = adapter->netdev->hw_features;
5153 adapter->netdev->hw_features = 0;
5156 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
5159 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
5162 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
5164 if ((adapter->netdev->features &
5166 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
5169 adapter->netdev->hw_features |= NETIF_F_TSO;
5171 adapter->netdev->hw_features |= NETIF_F_TSO6;
5173 if (adapter->state == VNIC_PROBING) {
5174 adapter->netdev->features |= adapter->netdev->hw_features;
5175 } else if (old_hw_features != adapter->netdev->hw_features) {
5179 adapter->netdev->features &= adapter->netdev->hw_features;
5181 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
5182 adapter->netdev->hw_features;
5183 adapter->netdev->features |=
5184 tmp & adapter->netdev->wanted_features;
5191 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5192 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
5193 ibmvnic_send_crq(adapter, &crq);
5197 struct ibmvnic_adapter *adapter)
5199 struct device *dev = &adapter->vdev->dev;
5204 complete(&adapter->fw_done);
5208 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
5209 complete(&adapter->fw_done);
5213 struct ibmvnic_adapter *adapter)
5215 struct device *dev = &adapter->vdev->dev;
5219 memset(adapter->fw_version, 0, 32);
5221 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
5233 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
5240 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
5247 /* copy firmware version string from vpd into adapter */
5249 (adapter->vpd->buff + adapter->vpd->len)) {
5250 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
5256 if (adapter->fw_version[0] == '\0')
5257 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
5258 complete(&adapter->fw_done);
5261 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5263 struct device *dev = &adapter->vdev->dev;
5264 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5267 dma_unmap_single(dev, adapter->ip_offload_tok,
5268 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5270 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5271 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
5272 netdev_dbg(adapter->netdev, "%016lx\n",
5275 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5276 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5277 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5279 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5281 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5283 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5285 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5287 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5289 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5291 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5293 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5295 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5297 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5299 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5301 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5303 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5305 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5307 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5309 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5311 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5314 send_control_ip_offload(adapter);
5321 return "adapter problem";
5340 struct ibmvnic_adapter *adapter)
5342 struct device *dev = &adapter->vdev->dev;
5354 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5356 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5360 struct ibmvnic_adapter *adapter)
5362 struct net_device *netdev = adapter->netdev;
5363 struct device *dev = &adapter->vdev->dev;
5375 ether_addr_copy(adapter->mac_addr,
5378 complete(&adapter->fw_done);
5383 struct ibmvnic_adapter *adapter)
5385 struct device *dev = &adapter->vdev->dev;
5389 atomic_dec(&adapter->running_cap_crqs);
5390 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5391 atomic_read(&adapter->running_cap_crqs));
5394 req_value = &adapter->req_tx_queues;
5398 req_value = &adapter->req_rx_queues;
5402 req_value = &adapter->req_rx_add_queues;
5406 req_value = &adapter->req_tx_entries_per_subcrq;
5410 req_value = &adapter->req_rx_add_entries_per_subcrq;
5414 req_value = &adapter->req_mtu;
5418 req_value = &adapter->promisc;
5440 *req_value = adapter->fallback.mtu;
5446 send_request_cap(adapter, 1);
5455 if (atomic_read(&adapter->running_cap_crqs) == 0)
5456 send_query_ip_offload(adapter);
5460 struct ibmvnic_adapter *adapter)
5462 struct device *dev = &adapter->vdev->dev;
5463 struct net_device *netdev = adapter->netdev;
5464 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5465 struct ibmvnic_login_buffer *login = adapter->login_buf;
5477 if (!adapter->login_pending) {
5481 adapter->login_pending = false;
5488 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5489 complete(&adapter->init_done);
5493 if (adapter->failover_pending) {
5494 adapter->init_done_rc = -EAGAIN;
5496 complete(&adapter->init_done);
5501 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5503 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5504 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5505 netdev_dbg(adapter->netdev, "%016lx\n",
5506 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5512 adapter->req_rx_add_queues !=
5515 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5531 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5535 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5536 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5540 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5542 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5543 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5545 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5546 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5547 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5548 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5551 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5554 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5556 adapter->num_active_tx_scrqs = num_tx_pools;
5557 adapter->num_active_rx_scrqs = num_rx_pools;
5558 release_login_rsp_buffer(adapter);
5559 release_login_buffer(adapter);
5560 complete(&adapter->init_done);
5566 struct ibmvnic_adapter *adapter)
5568 struct device *dev = &adapter->vdev->dev;
5577 struct ibmvnic_adapter *adapter)
5579 struct net_device *netdev = adapter->netdev;
5580 struct device *dev = &adapter->vdev->dev;
5595 struct ibmvnic_adapter *adapter)
5597 struct net_device *netdev = adapter->netdev;
5598 struct device *dev = &adapter->vdev->dev;
5601 atomic_dec(&adapter->running_cap_crqs);
5603 atomic_read(&adapter->running_cap_crqs));
5612 adapter->min_tx_queues =
5615 adapter->min_tx_queues);
5618 adapter->min_rx_queues =
5621 adapter->min_rx_queues);
5624 adapter->min_rx_add_queues =
5627 adapter->min_rx_add_queues);
5630 adapter->max_tx_queues =
5633 adapter->max_tx_queues);
5636 adapter->max_rx_queues =
5639 adapter->max_rx_queues);
5642 adapter->max_rx_add_queues =
5645 adapter->max_rx_add_queues);
5648 adapter->min_tx_entries_per_subcrq =
5651 adapter->min_tx_entries_per_subcrq);
5654 adapter->min_rx_add_entries_per_subcrq =
5657 adapter->min_rx_add_entries_per_subcrq);
5660 adapter->max_tx_entries_per_subcrq =
5663 adapter->max_tx_entries_per_subcrq);
5666 adapter->max_rx_add_entries_per_subcrq =
5669 adapter->max_rx_add_entries_per_subcrq);
5672 adapter->tcp_ip_offload =
5675 adapter->tcp_ip_offload);
5678 adapter->promisc_supported =
5681 adapter->promisc_supported);
5684 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5685 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5686 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5689 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5690 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5691 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5694 adapter->max_multicast_filters =
5697 adapter->max_multicast_filters);
5700 adapter->vlan_header_insertion =
5702 if (adapter->vlan_header_insertion)
5705 adapter->vlan_header_insertion);
5708 adapter->rx_vlan_header_insertion =
5711 adapter->rx_vlan_header_insertion);
5714 adapter->max_tx_sg_entries =
5717 adapter->max_tx_sg_entries);
5720 adapter->rx_sg_supported =
5723 adapter->rx_sg_supported);
5726 adapter->opt_tx_comp_sub_queues =
5729 adapter->opt_tx_comp_sub_queues);
5732 adapter->opt_rx_comp_queues =
5735 adapter->opt_rx_comp_queues);
5738 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5741 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5744 adapter->opt_tx_entries_per_subcrq =
5747 adapter->opt_tx_entries_per_subcrq);
5750 adapter->opt_rxba_entries_per_subcrq =
5753 adapter->opt_rxba_entries_per_subcrq);
5756 adapter->tx_rx_desc_req = crq->query_capability.number;
5758 adapter->tx_rx_desc_req);
5767 if (atomic_read(&adapter->running_cap_crqs) == 0)
5768 send_request_cap(adapter, 0);
5771 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5780 mutex_lock(&adapter->fw_lock);
5781 adapter->fw_done_rc = 0;
5782 reinit_completion(&adapter->fw_done);
5784 rc = ibmvnic_send_crq(adapter, &crq);
5786 mutex_unlock(&adapter->fw_lock);
5790 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5792 mutex_unlock(&adapter->fw_lock);
5796 mutex_unlock(&adapter->fw_lock);
5797 return adapter->fw_done_rc ? -EIO : 0;
5801 struct ibmvnic_adapter *adapter)
5803 struct net_device *netdev = adapter->netdev;
5814 adapter->speed = SPEED_10;
5817 adapter->speed = SPEED_100;
5820 adapter->speed = SPEED_1000;
5823 adapter->speed = SPEED_10000;
5826 adapter->speed = SPEED_25000;
5829 adapter->speed = SPEED_40000;
5832 adapter->speed = SPEED_50000;
5835 adapter->speed = SPEED_100000;
5838 adapter->speed = SPEED_200000;
5843 adapter->speed = SPEED_UNKNOWN;
5846 adapter->duplex = DUPLEX_FULL;
5848 adapter->duplex = DUPLEX_HALF;
5850 adapter->duplex = DUPLEX_UNKNOWN;
5856 struct ibmvnic_adapter *adapter)
5859 struct net_device *netdev = adapter->netdev;
5860 struct device *dev = &adapter->vdev->dev;
5872 adapter->from_passive_init = true;
5876 adapter->login_pending = false;
5878 if (adapter->state == VNIC_DOWN)
5879 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5881 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5885 * reset either because the adapter was still
5890 * is already scheduled or the adapter is
5896 adapter->failover_pending = false;
5899 if (!completion_done(&adapter->init_done)) {
5900 if (!adapter->init_done_rc)
5901 adapter->init_done_rc = -EAGAIN;
5902 complete(&adapter->init_done);
5908 adapter->crq.active = true;
5909 send_version_xchg(adapter);
5917 adapter->crq.active = false;
5921 if (!completion_done(&adapter->fw_done)) {
5922 adapter->fw_done_rc = -EIO;
5923 complete(&adapter->fw_done);
5927 if (!completion_done(&adapter->init_done)) {
5928 adapter->init_done_rc = -EAGAIN;
5929 complete(&adapter->init_done);
5932 if (!completion_done(&adapter->stats_done))
5933 complete(&adapter->stats_done);
5934 if (test_bit(0, &adapter->resetting))
5935 adapter->force_reset_recovery = true;
5937 dev_info(dev, "Migrated, re-enabling adapter\n");
5938 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5941 adapter->failover_pending = true;
5943 /* The adapter lost the connection */
5946 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5968 send_query_cap(adapter);
5971 handle_query_cap_rsp(crq, adapter);
5974 handle_query_map_rsp(crq, adapter);
5977 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5978 complete(&adapter->fw_done);
5981 handle_request_unmap_rsp(crq, adapter);
5984 handle_request_cap_rsp(crq, adapter);
5988 handle_login_rsp(crq, adapter);
5995 adapter->logical_link_state =
5997 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5998 complete(&adapter->init_done);
6002 adapter->phys_link_state =
6004 adapter->logical_link_state =
6006 if (adapter->phys_link_state && adapter->logical_link_state)
6013 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6017 handle_error_indication(crq, adapter);
6021 complete(&adapter->stats_done);
6025 handle_query_ip_offload_rsp(adapter);
6032 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6033 sizeof(adapter->ip_offload_ctrl),
6035 complete(&adapter->init_done);
6039 complete(&adapter->fw_done);
6042 handle_vpd_size_rsp(crq, adapter);
6045 handle_vpd_rsp(crq, adapter);
6048 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6049 complete(&adapter->fw_done);
6059 struct ibmvnic_adapter *adapter = instance;
6061 tasklet_schedule(&adapter->tasklet);
6067 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6068 struct ibmvnic_crq_queue *queue = &adapter->crq;
6075 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6082 ibmvnic_handle_crq(crq, adapter);
6089 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6091 struct vio_dev *vdev = adapter->vdev;
6099 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6104 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6106 struct ibmvnic_crq_queue *crq = &adapter->crq;
6107 struct device *dev = &adapter->vdev->dev;
6108 struct vio_dev *vdev = adapter->vdev;
6130 dev_warn(dev, "Partner adapter not ready\n");
6137 static void release_crq_queue(struct ibmvnic_adapter *adapter)
6139 struct ibmvnic_crq_queue *crq = &adapter->crq;
6140 struct vio_dev *vdev = adapter->vdev;
6146 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6147 free_irq(vdev->irq, adapter);
6148 tasklet_kill(&adapter->tasklet);
6160 static int init_crq_queue(struct ibmvnic_adapter *adapter)
6162 struct ibmvnic_crq_queue *crq = &adapter->crq;
6163 struct device *dev = &adapter->vdev->dev;
6164 struct vio_dev *vdev = adapter->vdev;
6187 rc = ibmvnic_reset_crq(adapter);
6191 dev_warn(dev, "Partner adapter not ready\n");
6193 dev_warn(dev, "Error %d opening adapter\n", rc);
6199 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
6201 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6203 adapter->vdev->unit_address);
6204 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6221 tasklet_schedule(&adapter->tasklet);
6226 tasklet_kill(&adapter->tasklet);
6238 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6240 struct device *dev = &adapter->vdev->dev;
6242 u64 old_num_rx_queues = adapter->req_rx_queues;
6243 u64 old_num_tx_queues = adapter->req_tx_queues;
6246 adapter->from_passive_init = false;
6248 rc = ibmvnic_send_crq_init(adapter);
6254 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6259 if (adapter->init_done_rc) {
6260 release_crq_queue(adapter);
6261 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
6262 return adapter->init_done_rc;
6265 if (adapter->from_passive_init) {
6266 adapter->state = VNIC_OPEN;
6267 adapter->from_passive_init = false;
6273 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
6274 adapter->reset_reason != VNIC_RESET_MOBILITY) {
6275 if (adapter->req_rx_queues != old_num_rx_queues ||
6276 adapter->req_tx_queues != old_num_tx_queues) {
6277 release_sub_crqs(adapter, 0);
6278 rc = init_sub_crqs(adapter);
6287 clean_tx_pools(adapter);
6289 rc = reset_sub_crq_queues(adapter);
6292 rc = init_sub_crqs(adapter);
6297 release_crq_queue(adapter);
6301 rc = init_sub_crq_irqs(adapter);
6304 release_crq_queue(adapter);
6314 struct ibmvnic_adapter *adapter;
6338 adapter = netdev_priv(netdev);
6339 adapter->state = VNIC_PROBING;
6341 adapter->vdev = dev;
6342 adapter->netdev = netdev;
6343 adapter->login_pending = false;
6344 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6346 bitmap_set(adapter->map_ids, 0, 1);
6348 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6349 eth_hw_addr_set(netdev, adapter->mac_addr);
6355 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6356 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6358 INIT_LIST_HEAD(&adapter->rwi_list);
6359 spin_lock_init(&adapter->rwi_lock);
6360 spin_lock_init(&adapter->state_lock);
6361 mutex_init(&adapter->fw_lock);
6362 init_completion(&adapter->probe_done);
6363 init_completion(&adapter->init_done);
6364 init_completion(&adapter->fw_done);
6365 init_completion(&adapter->reset_done);
6366 init_completion(&adapter->stats_done);
6367 clear_bit(0, &adapter->resetting);
6368 adapter->prev_rx_buf_sz = 0;
6369 adapter->prev_mtu = 0;
6373 reinit_init_done(adapter);
6378 adapter->failover_pending = false;
6384 release_crq_queue(adapter);
6396 spin_lock_irqsave(&adapter->rwi_lock, flags);
6397 flush_reset_queue(adapter);
6398 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6400 rc = init_crq_queue(adapter);
6407 rc = ibmvnic_reset_init(adapter, false);
6418 rc = init_stats_buffers(adapter);
6422 rc = init_stats_token(adapter);
6433 adapter->state = VNIC_PROBED;
6434 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6435 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6436 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6438 adapter->state = VNIC_DOWN;
6441 adapter->wait_for_reset = false;
6442 adapter->last_reset_time = jiffies;
6451 rc = ibmvnic_cpu_notif_add(adapter);
6457 complete(&adapter->probe_done);
6468 release_stats_token(adapter);
6471 release_stats_buffers(adapter);
6474 release_sub_crqs(adapter, 1);
6475 release_crq_queue(adapter);
6480 adapter->state = VNIC_REMOVING;
6481 complete(&adapter->probe_done);
6482 flush_work(&adapter->ibmvnic_reset);
6483 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6485 flush_reset_queue(adapter);
6487 mutex_destroy(&adapter->fw_lock);
6496 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6499 spin_lock_irqsave(&adapter->state_lock, flags);
6508 spin_lock(&adapter->rwi_lock);
6509 adapter->state = VNIC_REMOVING;
6510 spin_unlock(&adapter->rwi_lock);
6512 spin_unlock_irqrestore(&adapter->state_lock, flags);
6514 ibmvnic_cpu_notif_remove(adapter);
6516 flush_work(&adapter->ibmvnic_reset);
6517 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6522 release_resources(adapter);
6523 release_rx_pools(adapter);
6524 release_tx_pools(adapter);
6525 release_sub_crqs(adapter, 1);
6526 release_crq_queue(adapter);
6528 release_stats_token(adapter);
6529 release_stats_buffers(adapter);
6531 adapter->state = VNIC_REMOVED;
6534 mutex_destroy(&adapter->fw_lock);
6544 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6552 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6563 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6576 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6585 struct ibmvnic_adapter *adapter;
6596 adapter = netdev_priv(netdev);
6601 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6604 for (i = 0; i < adapter->num_active_rx_pools; i++)
6605 ret += adapter->rx_pool[i].size *
6606 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6614 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6616 if (adapter->state != VNIC_OPEN)
6619 tasklet_schedule(&adapter->tasklet);