Lines Matching refs:adapter
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
100 static void send_query_map(struct ibmvnic_adapter *adapter);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_query_cap(struct ibmvnic_adapter *adapter);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
110 static int init_crq_queue(struct ibmvnic_adapter *adapter);
111 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
163 * @adapter: private device data
170 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
178 netdev = adapter->netdev;
182 if (!adapter->crq.active) {
195 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
198 struct device *dev = &adapter->vdev->dev;
209 ltb->map_id = adapter->map_id;
210 adapter->map_id++;
212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
214 reinit_completion(&adapter->fw_done);
216 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
222 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
230 if (adapter->fw_done_rc) {
232 adapter->fw_done_rc);
242 mutex_unlock(&adapter->fw_lock);
246 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
249 struct device *dev = &adapter->vdev->dev;
258 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
259 adapter->reset_reason != VNIC_RESET_MOBILITY &&
260 adapter->reset_reason != VNIC_RESET_TIMEOUT)
261 send_request_unmap(adapter, ltb->map_id);
267 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
270 struct device *dev = &adapter->vdev->dev;
275 mutex_lock(&adapter->fw_lock);
276 adapter->fw_done_rc = 0;
278 reinit_completion(&adapter->fw_done);
279 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
281 mutex_unlock(&adapter->fw_lock);
285 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
289 mutex_unlock(&adapter->fw_lock);
293 if (adapter->fw_done_rc) {
296 free_long_term_buff(adapter, ltb);
297 mutex_unlock(&adapter->fw_lock);
298 return alloc_long_term_buff(adapter, ltb, ltb->size);
300 mutex_unlock(&adapter->fw_lock);
304 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
308 for (i = 0; i < adapter->num_active_rx_pools; i++)
309 adapter->rx_pool[i].active = 0;
312 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
316 u64 handle = adapter->rx_scrq[pool->index]->handle;
317 struct device *dev = &adapter->vdev->dev;
336 adapter->replenish_no_mem++;
375 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
380 adapter->replenish_add_buff_success++;
393 adapter->replenish_add_buff_failure++;
396 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
402 deactivate_rx_pools(adapter);
403 netif_carrier_off(adapter->netdev);
407 static void replenish_pools(struct ibmvnic_adapter *adapter)
411 adapter->replenish_task_cycles++;
412 for (i = 0; i < adapter->num_active_rx_pools; i++) {
413 if (adapter->rx_pool[i].active)
414 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
417 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
420 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
422 kfree(adapter->tx_stats_buffers);
423 kfree(adapter->rx_stats_buffers);
424 adapter->tx_stats_buffers = NULL;
425 adapter->rx_stats_buffers = NULL;
428 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
430 adapter->tx_stats_buffers =
434 if (!adapter->tx_stats_buffers)
437 adapter->rx_stats_buffers =
441 if (!adapter->rx_stats_buffers)
447 static void release_stats_token(struct ibmvnic_adapter *adapter)
449 struct device *dev = &adapter->vdev->dev;
451 if (!adapter->stats_token)
454 dma_unmap_single(dev, adapter->stats_token,
457 adapter->stats_token = 0;
460 static int init_stats_token(struct ibmvnic_adapter *adapter)
462 struct device *dev = &adapter->vdev->dev;
465 stok = dma_map_single(dev, &adapter->stats,
473 adapter->stats_token = stok;
474 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
478 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
485 if (!adapter->rx_pool)
488 buff_size = adapter->cur_rx_buf_sz;
489 rx_scrqs = adapter->num_active_rx_pools;
491 rx_pool = &adapter->rx_pool[i];
493 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
496 free_long_term_buff(adapter, &rx_pool->long_term_buff);
498 rc = alloc_long_term_buff(adapter,
503 rc = reset_long_term_buff(adapter,
525 static void release_rx_pools(struct ibmvnic_adapter *adapter)
530 if (!adapter->rx_pool)
533 for (i = 0; i < adapter->num_active_rx_pools; i++) {
534 rx_pool = &adapter->rx_pool[i];
536 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
539 free_long_term_buff(adapter, &rx_pool->long_term_buff);
554 kfree(adapter->rx_pool);
555 adapter->rx_pool = NULL;
556 adapter->num_active_rx_pools = 0;
561 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
562 struct device *dev = &adapter->vdev->dev;
568 rxadd_subcrqs = adapter->num_active_rx_scrqs;
569 buff_size = adapter->cur_rx_buf_sz;
571 adapter->rx_pool = kcalloc(rxadd_subcrqs,
574 if (!adapter->rx_pool) {
579 adapter->num_active_rx_pools = rxadd_subcrqs;
582 rx_pool = &adapter->rx_pool[i];
584 netdev_dbg(adapter->netdev,
586 i, adapter->req_rx_add_entries_per_subcrq,
589 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
597 release_rx_pools(adapter);
606 release_rx_pools(adapter);
610 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
612 release_rx_pools(adapter);
627 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
632 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
649 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
654 if (!adapter->tx_pool)
657 tx_scrqs = adapter->num_active_tx_pools;
659 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
662 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
670 static void release_vpd_data(struct ibmvnic_adapter *adapter)
672 if (!adapter->vpd)
675 kfree(adapter->vpd->buff);
676 kfree(adapter->vpd);
678 adapter->vpd = NULL;
681 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
686 free_long_term_buff(adapter, &tx_pool->long_term_buff);
689 static void release_tx_pools(struct ibmvnic_adapter *adapter)
693 if (!adapter->tx_pool)
696 for (i = 0; i < adapter->num_active_tx_pools; i++) {
697 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
698 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
701 kfree(adapter->tx_pool);
702 adapter->tx_pool = NULL;
703 kfree(adapter->tso_pool);
704 adapter->tso_pool = NULL;
705 adapter->num_active_tx_pools = 0;
712 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
721 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
742 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
746 tx_subcrqs = adapter->num_active_tx_scrqs;
747 adapter->tx_pool = kcalloc(tx_subcrqs,
749 if (!adapter->tx_pool)
752 adapter->tso_pool = kcalloc(tx_subcrqs,
754 if (!adapter->tso_pool) {
755 kfree(adapter->tx_pool);
756 adapter->tx_pool = NULL;
760 adapter->num_active_tx_pools = tx_subcrqs;
763 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
764 adapter->req_tx_entries_per_subcrq,
765 adapter->req_mtu + VLAN_HLEN);
767 release_tx_pools(adapter);
771 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
775 release_tx_pools(adapter);
783 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
787 if (adapter->napi_enabled)
790 for (i = 0; i < adapter->req_rx_queues; i++)
791 napi_enable(&adapter->napi[i]);
793 adapter->napi_enabled = true;
796 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
800 if (!adapter->napi_enabled)
803 for (i = 0; i < adapter->req_rx_queues; i++) {
804 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
805 napi_disable(&adapter->napi[i]);
808 adapter->napi_enabled = false;
811 static int init_napi(struct ibmvnic_adapter *adapter)
815 adapter->napi = kcalloc(adapter->req_rx_queues,
817 if (!adapter->napi)
820 for (i = 0; i < adapter->req_rx_queues; i++) {
821 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
822 netif_napi_add(adapter->netdev, &adapter->napi[i],
826 adapter->num_active_rx_napi = adapter->req_rx_queues;
830 static void release_napi(struct ibmvnic_adapter *adapter)
834 if (!adapter->napi)
837 for (i = 0; i < adapter->num_active_rx_napi; i++) {
838 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
839 netif_napi_del(&adapter->napi[i]);
842 kfree(adapter->napi);
843 adapter->napi = NULL;
844 adapter->num_active_rx_napi = 0;
845 adapter->napi_enabled = false;
850 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
864 adapter->init_done_rc = 0;
865 reinit_completion(&adapter->init_done);
866 rc = send_login(adapter);
870 if (!wait_for_completion_timeout(&adapter->init_done,
874 adapter->init_done_rc = 0;
879 if (adapter->init_done_rc == ABORTED) {
882 adapter->init_done_rc = 0;
888 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
890 release_sub_crqs(adapter, 1);
895 adapter->init_done_rc = 0;
896 reinit_completion(&adapter->init_done);
897 send_query_cap(adapter);
898 if (!wait_for_completion_timeout(&adapter->init_done,
905 rc = init_sub_crqs(adapter);
912 rc = init_sub_crq_irqs(adapter);
918 } else if (adapter->init_done_rc) {
924 __ibmvnic_set_mac(netdev, adapter->mac_addr);
926 netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
930 static void release_login_buffer(struct ibmvnic_adapter *adapter)
932 if (!adapter->login_buf)
935 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
936 adapter->login_buf_sz, DMA_TO_DEVICE);
937 kfree(adapter->login_buf);
938 adapter->login_buf = NULL;
941 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
943 if (!adapter->login_rsp_buf)
946 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
947 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
948 kfree(adapter->login_rsp_buf);
949 adapter->login_rsp_buf = NULL;
952 static void release_resources(struct ibmvnic_adapter *adapter)
954 release_vpd_data(adapter);
956 release_tx_pools(adapter);
957 release_rx_pools(adapter);
959 release_napi(adapter);
960 release_login_buffer(adapter);
961 release_login_rsp_buffer(adapter);
964 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
966 struct net_device *netdev = adapter->netdev;
982 reinit_completion(&adapter->init_done);
983 rc = ibmvnic_send_crq(adapter, &crq);
989 if (!wait_for_completion_timeout(&adapter->init_done,
995 if (adapter->init_done_rc == PARTIALSUCCESS) {
999 } else if (adapter->init_done_rc) {
1001 adapter->init_done_rc);
1002 return adapter->init_done_rc;
1011 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1015 adapter->req_tx_queues, adapter->req_rx_queues);
1017 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1023 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1030 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1032 struct device *dev = &adapter->vdev->dev;
1037 if (adapter->vpd->buff)
1038 len = adapter->vpd->len;
1040 mutex_lock(&adapter->fw_lock);
1041 adapter->fw_done_rc = 0;
1042 reinit_completion(&adapter->fw_done);
1046 rc = ibmvnic_send_crq(adapter, &crq);
1048 mutex_unlock(&adapter->fw_lock);
1052 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1055 mutex_unlock(&adapter->fw_lock);
1058 mutex_unlock(&adapter->fw_lock);
1060 if (!adapter->vpd->len)
1063 if (!adapter->vpd->buff)
1064 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1065 else if (adapter->vpd->len != len)
1066 adapter->vpd->buff =
1067 krealloc(adapter->vpd->buff,
1068 adapter->vpd->len, GFP_KERNEL);
1070 if (!adapter->vpd->buff) {
1075 adapter->vpd->dma_addr =
1076 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1078 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1080 kfree(adapter->vpd->buff);
1081 adapter->vpd->buff = NULL;
1085 mutex_lock(&adapter->fw_lock);
1086 adapter->fw_done_rc = 0;
1087 reinit_completion(&adapter->fw_done);
1091 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1092 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1093 rc = ibmvnic_send_crq(adapter, &crq);
1095 kfree(adapter->vpd->buff);
1096 adapter->vpd->buff = NULL;
1097 mutex_unlock(&adapter->fw_lock);
1101 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1104 kfree(adapter->vpd->buff);
1105 adapter->vpd->buff = NULL;
1106 mutex_unlock(&adapter->fw_lock);
1110 mutex_unlock(&adapter->fw_lock);
1114 static int init_resources(struct ibmvnic_adapter *adapter)
1116 struct net_device *netdev = adapter->netdev;
1123 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1124 if (!adapter->vpd)
1128 rc = ibmvnic_get_vpd(adapter);
1134 adapter->map_id = 1;
1136 rc = init_napi(adapter);
1140 send_query_map(adapter);
1152 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1153 enum vnic_state prev_state = adapter->state;
1156 adapter->state = VNIC_OPENING;
1157 replenish_pools(adapter);
1158 ibmvnic_napi_enable(adapter);
1163 for (i = 0; i < adapter->req_rx_queues; i++) {
1166 enable_irq(adapter->rx_scrq[i]->irq);
1167 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1170 for (i = 0; i < adapter->req_tx_queues; i++) {
1173 enable_irq(adapter->tx_scrq[i]->irq);
1174 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1177 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1179 ibmvnic_napi_disable(adapter);
1180 release_resources(adapter);
1187 for (i = 0; i < adapter->req_rx_queues; i++)
1188 napi_schedule(&adapter->napi[i]);
1191 adapter->state = VNIC_OPEN;
1197 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1203 if (adapter->failover_pending) {
1204 adapter->state = VNIC_OPEN;
1208 if (adapter->state != VNIC_CLOSED) {
1213 rc = init_resources(adapter);
1216 release_resources(adapter);
1228 if (rc && adapter->failover_pending) {
1229 adapter->state = VNIC_OPEN;
1235 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1243 if (!adapter->rx_pool)
1246 rx_scrqs = adapter->num_active_rx_pools;
1247 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1251 rx_pool = &adapter->rx_pool[i];
1255 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1266 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1287 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1292 if (!adapter->tx_pool || !adapter->tso_pool)
1295 tx_scrqs = adapter->num_active_tx_pools;
1299 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1300 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1301 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1305 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1307 struct net_device *netdev = adapter->netdev;
1310 if (adapter->tx_scrq) {
1311 for (i = 0; i < adapter->req_tx_queues; i++)
1312 if (adapter->tx_scrq[i]->irq) {
1315 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1316 disable_irq(adapter->tx_scrq[i]->irq);
1320 if (adapter->rx_scrq) {
1321 for (i = 0; i < adapter->req_rx_queues; i++) {
1322 if (adapter->rx_scrq[i]->irq) {
1325 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1326 disable_irq(adapter->rx_scrq[i]->irq);
1334 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1337 if (test_bit(0, &adapter->resetting))
1342 ibmvnic_napi_disable(adapter);
1343 ibmvnic_disable_irqs(adapter);
1345 clean_rx_pools(adapter);
1346 clean_tx_pools(adapter);
1351 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1354 adapter->state = VNIC_CLOSING;
1355 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1356 adapter->state = VNIC_CLOSED;
1362 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1366 adapter->state, adapter->failover_pending,
1367 adapter->force_reset_recovery);
1372 if (adapter->failover_pending) {
1373 adapter->state = VNIC_CLOSED;
1536 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1538 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1539 struct device *dev = &adapter->vdev->dev;
1560 if (test_bit(0, &adapter->resetting)) {
1576 tx_pool = &adapter->tso_pool[queue_num];
1578 tx_pool = &adapter->tx_pool[queue_num];
1580 tx_scrq = adapter->tx_scrq[queue_num];
1651 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1697 lpar_rc = send_subcrq_indirect(adapter, handle,
1704 lpar_rc = send_subcrq(adapter, handle,
1713 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1730 >= adapter->req_tx_entries_per_subcrq) {
1753 adapter->tx_send_failed += tx_send_failed;
1754 adapter->tx_map_failed += tx_map_failed;
1755 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1756 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1757 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1764 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1773 if (!adapter->promisc_supported)
1782 ibmvnic_send_crq(adapter, &crq);
1789 ibmvnic_send_crq(adapter, &crq);
1799 ibmvnic_send_crq(adapter, &crq);
1807 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1821 mutex_lock(&adapter->fw_lock);
1822 adapter->fw_done_rc = 0;
1823 reinit_completion(&adapter->fw_done);
1825 rc = ibmvnic_send_crq(adapter, &crq);
1828 mutex_unlock(&adapter->fw_lock);
1832 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1834 if (rc || adapter->fw_done_rc) {
1836 mutex_unlock(&adapter->fw_lock);
1839 mutex_unlock(&adapter->fw_lock);
1842 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1848 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1856 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1857 if (adapter->state != VNIC_PROBED)
1867 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1871 struct net_device *netdev = adapter->netdev;
1874 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1878 adapter->reset_reason = rwi->reset_reason;
1888 release_resources(adapter);
1889 release_sub_crqs(adapter, 1);
1890 release_crq_queue(adapter);
1892 adapter->state = VNIC_PROBED;
1894 rc = init_crq_queue(adapter);
1897 netdev_err(adapter->netdev,
1902 rc = ibmvnic_reset_init(adapter, true);
1908 /* If the adapter was in PROBE state prior to the reset,
1919 rc = init_resources(adapter);
1923 ibmvnic_disable_irqs(adapter);
1925 adapter->state = VNIC_CLOSED;
1940 for (i = 0; i < adapter->req_rx_queues; i++)
1941 napi_schedule(&adapter->napi[i]);
1945 adapter->state = reset_state;
1953 static int do_reset(struct ibmvnic_adapter *adapter,
1958 struct net_device *netdev = adapter->netdev;
1961 netdev_dbg(adapter->netdev,
1963 adapter->state, adapter->failover_pending,
1973 adapter->failover_pending = false;
1976 adapter->reset_reason = rwi->reset_reason;
1978 old_num_rx_queues = adapter->req_rx_queues;
1979 old_num_tx_queues = adapter->req_tx_queues;
1980 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1981 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1986 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1987 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1988 adapter->state = VNIC_CLOSING;
1996 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2001 if (adapter->state != VNIC_CLOSING) {
2006 adapter->state = VNIC_CLOSED;
2009 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2013 adapter->state = VNIC_PROBED;
2015 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2016 rc = ibmvnic_reenable_crq_queue(adapter);
2017 release_sub_crqs(adapter, 1);
2019 rc = ibmvnic_reset_crq(adapter);
2021 rc = vio_enable_interrupts(adapter->vdev);
2023 netdev_err(adapter->netdev,
2030 netdev_err(adapter->netdev,
2035 rc = ibmvnic_reset_init(adapter, true);
2041 /* If the adapter was in PROBE state prior to the reset,
2054 if (adapter->req_rx_queues != old_num_rx_queues ||
2055 adapter->req_tx_queues != old_num_tx_queues ||
2056 adapter->req_rx_add_entries_per_subcrq !=
2058 adapter->req_tx_entries_per_subcrq !=
2060 !adapter->rx_pool ||
2061 !adapter->tso_pool ||
2062 !adapter->tx_pool) {
2063 release_rx_pools(adapter);
2064 release_tx_pools(adapter);
2065 release_napi(adapter);
2066 release_vpd_data(adapter);
2068 rc = init_resources(adapter);
2073 rc = reset_tx_pools(adapter);
2075 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2080 rc = reset_rx_pools(adapter);
2082 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2087 ibmvnic_disable_irqs(adapter);
2089 adapter->state = VNIC_CLOSED;
2105 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2106 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2114 /* restore the adapter state if reset failed */
2116 adapter->state = reset_state;
2119 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
2120 adapter->state, adapter->failover_pending, rc);
2124 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2127 struct net_device *netdev = adapter->netdev;
2130 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2134 adapter->reset_reason = rwi->reset_reason;
2137 release_resources(adapter);
2138 release_sub_crqs(adapter, 0);
2139 release_crq_queue(adapter);
2144 adapter->state = VNIC_PROBED;
2146 reinit_completion(&adapter->init_done);
2147 rc = init_crq_queue(adapter);
2149 netdev_err(adapter->netdev,
2154 rc = ibmvnic_reset_init(adapter, false);
2158 /* If the adapter was in PROBE state prior to the reset,
2168 rc = init_resources(adapter);
2172 ibmvnic_disable_irqs(adapter);
2173 adapter->state = VNIC_CLOSED;
2187 /* restore adapter state if reset failed */
2189 adapter->state = reset_state;
2190 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
2191 adapter->state, adapter->failover_pending, rc);
2195 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2200 spin_lock_irqsave(&adapter->rwi_lock, flags);
2202 if (!list_empty(&adapter->rwi_list)) {
2203 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2210 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2217 struct ibmvnic_adapter *adapter;
2223 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2225 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2226 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2231 rwi = get_next_rwi(adapter);
2233 spin_lock_irqsave(&adapter->state_lock, flags);
2235 if (adapter->state == VNIC_REMOVING ||
2236 adapter->state == VNIC_REMOVED) {
2237 spin_unlock_irqrestore(&adapter->state_lock, flags);
2244 reset_state = adapter->state;
2247 spin_unlock_irqrestore(&adapter->state_lock, flags);
2251 rc = do_change_param_reset(adapter, rwi, reset_state);
2252 } else if (adapter->force_reset_recovery) {
2258 adapter->failover_pending = false;
2261 if (adapter->wait_for_reset) {
2263 adapter->force_reset_recovery = false;
2264 rc = do_hard_reset(adapter, rwi, reset_state);
2267 adapter->force_reset_recovery = false;
2268 rc = do_hard_reset(adapter, rwi, reset_state);
2273 netdev_dbg(adapter->netdev,
2275 adapter->state);
2280 rc = do_reset(adapter, rwi, reset_state);
2283 adapter->last_reset_time = jiffies;
2286 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2288 rwi = get_next_rwi(adapter);
2292 adapter->force_reset_recovery = true;
2295 if (adapter->wait_for_reset) {
2296 adapter->reset_done_rc = rc;
2297 complete(&adapter->reset_done);
2300 clear_bit_unlock(0, &adapter->resetting);
2302 netdev_dbg(adapter->netdev,
2304 adapter->state, adapter->force_reset_recovery,
2305 adapter->wait_for_reset);
2310 struct ibmvnic_adapter *adapter;
2312 adapter = container_of(work, struct ibmvnic_adapter,
2314 __ibmvnic_reset(&adapter->ibmvnic_reset);
2317 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2322 struct net_device *netdev = adapter->netdev;
2326 spin_lock_irqsave(&adapter->rwi_lock, flags);
2334 if (adapter->state == VNIC_REMOVING ||
2335 adapter->state == VNIC_REMOVED ||
2336 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2342 if (adapter->state == VNIC_PROBING) {
2344 adapter->init_done_rc = EAGAIN;
2349 list_for_each(entry, &adapter->rwi_list) {
2367 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2368 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
2374 list_add_tail(&rwi->list, &adapter->rwi_list);
2375 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2376 schedule_work(&adapter->ibmvnic_reset);
2381 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2391 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2393 if (test_bit(0, &adapter->resetting)) {
2394 netdev_err(adapter->netdev,
2401 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2405 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2408 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2411 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2424 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2425 int scrq_num = (int)(napi - adapter->napi);
2437 if (unlikely(test_bit(0, &adapter->resetting) &&
2438 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2439 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2444 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2452 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2463 remove_buff_from_pool(adapter, rx_buff);
2468 remove_buff_from_pool(adapter, rx_buff);
2484 if (adapter->rx_vlan_header_insertion &&
2491 remove_buff_from_pool(adapter, rx_buff);
2506 adapter->rx_stats_buffers[scrq_num].packets++;
2507 adapter->rx_stats_buffers[scrq_num].bytes += length;
2511 if (adapter->state != VNIC_CLOSING)
2512 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2515 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2517 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2519 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2526 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2530 adapter->fallback.mtu = adapter->req_mtu;
2531 adapter->fallback.rx_queues = adapter->req_rx_queues;
2532 adapter->fallback.tx_queues = adapter->req_tx_queues;
2533 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2534 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2536 reinit_completion(&adapter->reset_done);
2537 adapter->wait_for_reset = true;
2538 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2544 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2551 if (adapter->reset_done_rc) {
2553 adapter->desired.mtu = adapter->fallback.mtu;
2554 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2555 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2556 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2557 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2559 reinit_completion(&adapter->reset_done);
2560 adapter->wait_for_reset = true;
2561 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2566 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2574 adapter->wait_for_reset = false;
2581 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2583 adapter->desired.mtu = new_mtu + ETH_HLEN;
2585 return wait_for_reset(adapter);
2622 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2625 rc = send_query_phys_parms(adapter);
2627 adapter->speed = SPEED_UNKNOWN;
2628 adapter->duplex = DUPLEX_UNKNOWN;
2630 cmd->base.speed = adapter->speed;
2631 cmd->base.duplex = adapter->duplex;
2642 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2646 strlcpy(info->fw_version, adapter->fw_version,
2652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2654 return adapter->msg_enable;
2659 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2661 adapter->msg_enable = data;
2666 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2671 return adapter->logical_link_state;
2677 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2679 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2680 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2683 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2684 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2692 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2694 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2695 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2698 adapter->max_rx_add_entries_per_subcrq);
2700 adapter->max_tx_entries_per_subcrq);
2704 adapter->desired.rx_entries = ring->rx_pending;
2705 adapter->desired.tx_entries = ring->tx_pending;
2707 return wait_for_reset(adapter);
2713 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2715 channels->max_rx = adapter->max_rx_queues;
2716 channels->max_tx = adapter->max_tx_queues;
2719 channels->rx_count = adapter->req_rx_queues;
2720 channels->tx_count = adapter->req_tx_queues;
2728 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2730 adapter->desired.rx_queues = channels->rx_count;
2731 adapter->desired.tx_queues = channels->tx_count;
2733 return wait_for_reset(adapter);
2738 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2747 for (i = 0; i < adapter->req_tx_queues; i++) {
2758 for (i = 0; i < adapter->req_rx_queues; i++) {
2772 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2777 adapter->req_tx_queues * NUM_TX_STATS +
2778 adapter->req_rx_queues * NUM_RX_STATS;
2787 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2795 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2800 reinit_completion(&adapter->stats_done);
2801 rc = ibmvnic_send_crq(adapter, &crq);
2804 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2810 (adapter, ibmvnic_stats[i].offset));
2812 for (j = 0; j < adapter->req_tx_queues; j++) {
2813 data[i] = adapter->tx_stats_buffers[j].packets;
2815 data[i] = adapter->tx_stats_buffers[j].bytes;
2817 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2821 for (j = 0; j < adapter->req_rx_queues; j++) {
2822 data[i] = adapter->rx_stats_buffers[j].packets;
2824 data[i] = adapter->rx_stats_buffers[j].bytes;
2826 data[i] = adapter->rx_stats_buffers[j].interrupts;
2848 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2854 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2868 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2872 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2877 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2881 if (!adapter->tx_scrq || !adapter->rx_scrq)
2884 for (i = 0; i < adapter->req_tx_queues; i++) {
2885 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2886 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2891 for (i = 0; i < adapter->req_rx_queues; i++) {
2892 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2893 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2901 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2905 struct device *dev = &adapter->vdev->dev;
2908 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2914 adapter->vdev->unit_address,
2919 netdev_err(adapter->netdev,
2932 *adapter)
2934 struct device *dev = &adapter->vdev->dev;
2956 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2960 rc = ibmvnic_reset_crq(adapter);
2963 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2969 scrq->adapter = adapter;
2973 netdev_dbg(adapter->netdev,
2990 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2994 if (adapter->tx_scrq) {
2995 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2996 if (!adapter->tx_scrq[i])
2999 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3001 if (adapter->tx_scrq[i]->irq) {
3002 free_irq(adapter->tx_scrq[i]->irq,
3003 adapter->tx_scrq[i]);
3004 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3005 adapter->tx_scrq[i]->irq = 0;
3008 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3012 kfree(adapter->tx_scrq);
3013 adapter->tx_scrq = NULL;
3014 adapter->num_active_tx_scrqs = 0;
3017 if (adapter->rx_scrq) {
3018 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3019 if (!adapter->rx_scrq[i])
3022 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3024 if (adapter->rx_scrq[i]->irq) {
3025 free_irq(adapter->rx_scrq[i]->irq,
3026 adapter->rx_scrq[i]);
3027 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3028 adapter->rx_scrq[i]->irq = 0;
3031 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3035 kfree(adapter->rx_scrq);
3036 adapter->rx_scrq = NULL;
3037 adapter->num_active_rx_scrqs = 0;
3041 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3044 struct device *dev = &adapter->vdev->dev;
3047 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3055 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3058 struct device *dev = &adapter->vdev->dev;
3066 if (test_bit(0, &adapter->resetting) &&
3067 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3079 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3087 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3090 struct device *dev = &adapter->vdev->dev;
3098 while (pending_scrq(adapter, scrq)) {
3109 next = ibmvnic_next_scrq(adapter, scrq);
3116 tx_pool = &adapter->tso_pool[pool];
3119 tx_pool = &adapter->tx_pool[pool];
3147 (adapter->req_tx_entries_per_subcrq / 2) &&
3148 __netif_subqueue_stopped(adapter->netdev,
3150 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3151 netdev_dbg(adapter->netdev, "Started queue %d\n",
3156 enable_scrq_irq(adapter, scrq);
3158 if (pending_scrq(adapter, scrq)) {
3159 disable_scrq_irq(adapter, scrq);
3169 struct ibmvnic_adapter *adapter = scrq->adapter;
3171 disable_scrq_irq(adapter, scrq);
3172 ibmvnic_complete_tx(adapter, scrq);
3180 struct ibmvnic_adapter *adapter = scrq->adapter;
3185 if (unlikely(adapter->state != VNIC_OPEN))
3188 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3190 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3191 disable_scrq_irq(adapter, scrq);
3192 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3198 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3200 struct device *dev = &adapter->vdev->dev;
3205 for (i = 0; i < adapter->req_tx_queues; i++) {
3206 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3208 scrq = adapter->tx_scrq[i];
3218 adapter->vdev->unit_address, i);
3230 for (i = 0; i < adapter->req_rx_queues; i++) {
3231 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3233 scrq = adapter->rx_scrq[i];
3241 adapter->vdev->unit_address, i);
3255 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3256 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3258 i = adapter->req_tx_queues;
3261 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3262 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3264 release_sub_crqs(adapter, 1);
3268 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3270 struct device *dev = &adapter->vdev->dev;
3277 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3284 allqueues[i] = init_sub_crq_queue(adapter);
3294 adapter->min_tx_queues + adapter->min_rx_queues) {
3301 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3304 if (adapter->req_rx_queues > adapter->min_rx_queues)
3305 adapter->req_rx_queues--;
3310 if (adapter->req_tx_queues > adapter->min_tx_queues)
3311 adapter->req_tx_queues--;
3318 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3319 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3320 if (!adapter->tx_scrq)
3323 for (i = 0; i < adapter->req_tx_queues; i++) {
3324 adapter->tx_scrq[i] = allqueues[i];
3325 adapter->tx_scrq[i]->pool_index = i;
3326 adapter->num_active_tx_scrqs++;
3329 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3330 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3331 if (!adapter->rx_scrq)
3334 for (i = 0; i < adapter->req_rx_queues; i++) {
3335 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3336 adapter->rx_scrq[i]->scrq_num = i;
3337 adapter->num_active_rx_scrqs++;
3344 kfree(adapter->tx_scrq);
3345 adapter->tx_scrq = NULL;
3348 release_sub_crq_queue(adapter, allqueues[i], 1);
3353 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3355 struct device *dev = &adapter->vdev->dev;
3365 if (!(adapter->netdev->flags & IFF_PROMISC) ||
3366 adapter->promisc_supported)
3375 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3377 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3378 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3383 if (adapter->desired.mtu)
3384 adapter->req_mtu = adapter->desired.mtu;
3386 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3388 if (!adapter->desired.tx_entries)
3389 adapter->desired.tx_entries =
3390 adapter->max_tx_entries_per_subcrq;
3391 if (!adapter->desired.rx_entries)
3392 adapter->desired.rx_entries =
3393 adapter->max_rx_add_entries_per_subcrq;
3396 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3398 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3399 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3400 adapter->desired.tx_entries = max_entries;
3403 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3404 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3405 adapter->desired.rx_entries = max_entries;
3408 if (adapter->desired.tx_entries)
3409 adapter->req_tx_entries_per_subcrq =
3410 adapter->desired.tx_entries;
3412 adapter->req_tx_entries_per_subcrq =
3413 adapter->max_tx_entries_per_subcrq;
3415 if (adapter->desired.rx_entries)
3416 adapter->req_rx_add_entries_per_subcrq =
3417 adapter->desired.rx_entries;
3419 adapter->req_rx_add_entries_per_subcrq =
3420 adapter->max_rx_add_entries_per_subcrq;
3422 if (adapter->desired.tx_queues)
3423 adapter->req_tx_queues =
3424 adapter->desired.tx_queues;
3426 adapter->req_tx_queues =
3427 adapter->opt_tx_comp_sub_queues;
3429 if (adapter->desired.rx_queues)
3430 adapter->req_rx_queues =
3431 adapter->desired.rx_queues;
3433 adapter->req_rx_queues =
3434 adapter->opt_rx_comp_queues;
3436 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3438 atomic_add(cap_reqs, &adapter->running_cap_crqs);
3445 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3447 ibmvnic_send_crq(adapter, &crq);
3450 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3452 ibmvnic_send_crq(adapter, &crq);
3455 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3457 ibmvnic_send_crq(adapter, &crq);
3462 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3464 ibmvnic_send_crq(adapter, &crq);
3469 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3471 ibmvnic_send_crq(adapter, &crq);
3474 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3476 ibmvnic_send_crq(adapter, &crq);
3478 if (adapter->netdev->flags & IFF_PROMISC) {
3479 if (adapter->promisc_supported) {
3484 ibmvnic_send_crq(adapter, &crq);
3491 ibmvnic_send_crq(adapter, &crq);
3500 static int pending_scrq(struct ibmvnic_adapter *adapter,
3511 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3535 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3537 struct ibmvnic_crq_queue *queue = &adapter->crq;
3556 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3570 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3573 unsigned int ua = adapter->vdev->unit_address;
3574 struct device *dev = &adapter->vdev->dev;
3578 netdev_dbg(adapter->netdev,
3602 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3605 unsigned int ua = adapter->vdev->unit_address;
3606 struct device *dev = &adapter->vdev->dev;
3621 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3624 unsigned int ua = adapter->vdev->unit_address;
3625 struct device *dev = &adapter->vdev->dev;
3629 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3633 if (!adapter->crq.active &&
3658 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3660 struct device *dev = &adapter->vdev->dev;
3668 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3671 rc = ibmvnic_send_crq(adapter, &crq);
3687 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3696 return ibmvnic_send_crq(adapter, &crq);
3705 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3716 len += strlen(adapter->netdev->name) + 1;
3721 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3743 len = strlen(adapter->netdev->name) + 1;
3745 strncpy(vlcd->name, adapter->netdev->name, len);
3748 static int send_login(struct ibmvnic_adapter *adapter)
3752 struct device *dev = &adapter->vdev->dev;
3765 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3766 netdev_err(adapter->netdev,
3771 release_login_buffer(adapter);
3772 release_login_rsp_buffer(adapter);
3774 client_data_len = vnic_client_data_len(adapter);
3778 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3793 sizeof(u64) * adapter->req_tx_queues +
3794 sizeof(u64) * adapter->req_rx_queues +
3795 sizeof(u64) * adapter->req_rx_queues +
3809 adapter->login_buf = login_buffer;
3810 adapter->login_buf_token = buffer_token;
3811 adapter->login_buf_sz = buffer_size;
3812 adapter->login_rsp_buf = login_rsp_buffer;
3813 adapter->login_rsp_buf_token = rsp_buffer_token;
3814 adapter->login_rsp_buf_sz = rsp_buffer_size;
3818 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3821 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3824 sizeof(u64) * adapter->req_tx_queues);
3832 sizeof(u64) * adapter->req_tx_queues);
3834 for (i = 0; i < adapter->req_tx_queues; i++) {
3835 if (adapter->tx_scrq[i]) {
3836 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3841 for (i = 0; i < adapter->req_rx_queues; i++) {
3842 if (adapter->rx_scrq[i]) {
3843 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3850 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3855 vnic_add_client_data(adapter, vlcd);
3857 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3858 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3859 netdev_dbg(adapter->netdev, "%016lx\n",
3860 ((unsigned long int *)(adapter->login_buf))[i]);
3869 adapter->login_pending = true;
3870 rc = ibmvnic_send_crq(adapter, &crq);
3872 adapter->login_pending = false;
3873 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3884 adapter->login_rsp_buf = NULL;
3889 adapter->login_buf = NULL;
3894 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3905 return ibmvnic_send_crq(adapter, &crq);
3908 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3916 return ibmvnic_send_crq(adapter, &crq);
3919 static void send_query_map(struct ibmvnic_adapter *adapter)
3926 ibmvnic_send_crq(adapter, &crq);
3930 static void send_query_cap(struct ibmvnic_adapter *adapter)
3941 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3948 ibmvnic_send_crq(adapter, &crq);
3952 ibmvnic_send_crq(adapter, &crq);
3956 ibmvnic_send_crq(adapter, &crq);
3960 ibmvnic_send_crq(adapter, &crq);
3964 ibmvnic_send_crq(adapter, &crq);
3968 ibmvnic_send_crq(adapter, &crq);
3973 ibmvnic_send_crq(adapter, &crq);
3978 ibmvnic_send_crq(adapter, &crq);
3983 ibmvnic_send_crq(adapter, &crq);
3988 ibmvnic_send_crq(adapter, &crq);
3992 ibmvnic_send_crq(adapter, &crq);
3996 ibmvnic_send_crq(adapter, &crq);
4000 ibmvnic_send_crq(adapter, &crq);
4004 ibmvnic_send_crq(adapter, &crq);
4008 ibmvnic_send_crq(adapter, &crq);
4012 ibmvnic_send_crq(adapter, &crq);
4016 ibmvnic_send_crq(adapter, &crq);
4020 ibmvnic_send_crq(adapter, &crq);
4024 ibmvnic_send_crq(adapter, &crq);
4028 ibmvnic_send_crq(adapter, &crq);
4032 ibmvnic_send_crq(adapter, &crq);
4037 ibmvnic_send_crq(adapter, &crq);
4042 ibmvnic_send_crq(adapter, &crq);
4047 ibmvnic_send_crq(adapter, &crq);
4052 ibmvnic_send_crq(adapter, &crq);
4061 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4064 struct device *dev = &adapter->vdev->dev;
4067 adapter->ip_offload_tok =
4069 &adapter->ip_offload_buf,
4073 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4084 cpu_to_be32(adapter->ip_offload_tok);
4086 ibmvnic_send_crq(adapter, &crq);
4089 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4091 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4092 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4093 struct device *dev = &adapter->vdev->dev;
4097 adapter->ip_offload_ctrl_tok =
4100 sizeof(adapter->ip_offload_ctrl),
4103 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4108 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4123 if (adapter->state != VNIC_PROBING) {
4124 old_hw_features = adapter->netdev->hw_features;
4125 adapter->netdev->hw_features = 0;
4128 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4131 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4134 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4136 if ((adapter->netdev->features &
4138 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4141 adapter->netdev->hw_features |= NETIF_F_TSO;
4143 adapter->netdev->hw_features |= NETIF_F_TSO6;
4145 if (adapter->state == VNIC_PROBING) {
4146 adapter->netdev->features |= adapter->netdev->hw_features;
4147 } else if (old_hw_features != adapter->netdev->hw_features) {
4151 adapter->netdev->features &= adapter->netdev->hw_features;
4153 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4154 adapter->netdev->hw_features;
4155 adapter->netdev->features |=
4156 tmp & adapter->netdev->wanted_features;
4163 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4164 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4165 ibmvnic_send_crq(adapter, &crq);
4169 struct ibmvnic_adapter *adapter)
4171 struct device *dev = &adapter->vdev->dev;
4176 complete(&adapter->fw_done);
4180 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4181 complete(&adapter->fw_done);
4185 struct ibmvnic_adapter *adapter)
4187 struct device *dev = &adapter->vdev->dev;
4191 memset(adapter->fw_version, 0, 32);
4193 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4205 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4212 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4219 /* copy firmware version string from vpd into adapter */
4221 (adapter->vpd->buff + adapter->vpd->len)) {
4222 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4228 if (adapter->fw_version[0] == '\0')
4229 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4230 complete(&adapter->fw_done);
4233 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4235 struct device *dev = &adapter->vdev->dev;
4236 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4239 dma_unmap_single(dev, adapter->ip_offload_tok,
4240 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4242 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4243 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4244 netdev_dbg(adapter->netdev, "%016lx\n",
4247 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4248 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4249 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4251 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4253 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4255 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4257 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4259 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4261 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4263 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4265 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4267 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4269 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4271 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4273 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4275 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4277 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4279 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4281 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4283 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4286 send_control_ip_offload(adapter);
4293 return "adapter problem";
4312 struct ibmvnic_adapter *adapter)
4314 struct device *dev = &adapter->vdev->dev;
4326 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4328 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4332 struct ibmvnic_adapter *adapter)
4334 struct net_device *netdev = adapter->netdev;
4335 struct device *dev = &adapter->vdev->dev;
4348 ether_addr_copy(adapter->mac_addr,
4351 complete(&adapter->fw_done);
4356 struct ibmvnic_adapter *adapter)
4358 struct device *dev = &adapter->vdev->dev;
4362 atomic_dec(&adapter->running_cap_crqs);
4363 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4364 atomic_read(&adapter->running_cap_crqs));
4367 req_value = &adapter->req_tx_queues;
4371 req_value = &adapter->req_rx_queues;
4375 req_value = &adapter->req_rx_add_queues;
4379 req_value = &adapter->req_tx_entries_per_subcrq;
4383 req_value = &adapter->req_rx_add_entries_per_subcrq;
4387 req_value = &adapter->req_mtu;
4391 req_value = &adapter->promisc;
4413 *req_value = adapter->fallback.mtu;
4419 send_request_cap(adapter, 1);
4428 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4429 adapter->wait_capability = false;
4430 send_query_ip_offload(adapter);
4435 struct ibmvnic_adapter *adapter)
4437 struct device *dev = &adapter->vdev->dev;
4438 struct net_device *netdev = adapter->netdev;
4439 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4440 struct ibmvnic_login_buffer *login = adapter->login_buf;
4452 if (!adapter->login_pending) {
4456 adapter->login_pending = false;
4463 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4464 complete(&adapter->init_done);
4468 if (adapter->failover_pending) {
4469 adapter->init_done_rc = -EAGAIN;
4471 complete(&adapter->init_done);
4476 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4478 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4479 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4480 netdev_dbg(adapter->netdev, "%016lx\n",
4481 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4487 adapter->req_rx_add_queues !=
4490 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4506 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4510 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4511 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4515 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4517 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4518 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4520 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4521 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4522 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4523 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4526 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4529 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4531 adapter->num_active_tx_scrqs = num_tx_pools;
4532 adapter->num_active_rx_scrqs = num_rx_pools;
4533 release_login_rsp_buffer(adapter);
4534 release_login_buffer(adapter);
4535 complete(&adapter->init_done);
4541 struct ibmvnic_adapter *adapter)
4543 struct device *dev = &adapter->vdev->dev;
4552 struct ibmvnic_adapter *adapter)
4554 struct net_device *netdev = adapter->netdev;
4555 struct device *dev = &adapter->vdev->dev;
4569 struct ibmvnic_adapter *adapter)
4571 struct net_device *netdev = adapter->netdev;
4572 struct device *dev = &adapter->vdev->dev;
4575 atomic_dec(&adapter->running_cap_crqs);
4577 atomic_read(&adapter->running_cap_crqs));
4586 adapter->min_tx_queues =
4589 adapter->min_tx_queues);
4592 adapter->min_rx_queues =
4595 adapter->min_rx_queues);
4598 adapter->min_rx_add_queues =
4601 adapter->min_rx_add_queues);
4604 adapter->max_tx_queues =
4607 adapter->max_tx_queues);
4610 adapter->max_rx_queues =
4613 adapter->max_rx_queues);
4616 adapter->max_rx_add_queues =
4619 adapter->max_rx_add_queues);
4622 adapter->min_tx_entries_per_subcrq =
4625 adapter->min_tx_entries_per_subcrq);
4628 adapter->min_rx_add_entries_per_subcrq =
4631 adapter->min_rx_add_entries_per_subcrq);
4634 adapter->max_tx_entries_per_subcrq =
4637 adapter->max_tx_entries_per_subcrq);
4640 adapter->max_rx_add_entries_per_subcrq =
4643 adapter->max_rx_add_entries_per_subcrq);
4646 adapter->tcp_ip_offload =
4649 adapter->tcp_ip_offload);
4652 adapter->promisc_supported =
4655 adapter->promisc_supported);
4658 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4659 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4660 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4663 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4664 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4665 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4668 adapter->max_multicast_filters =
4671 adapter->max_multicast_filters);
4674 adapter->vlan_header_insertion =
4676 if (adapter->vlan_header_insertion)
4679 adapter->vlan_header_insertion);
4682 adapter->rx_vlan_header_insertion =
4685 adapter->rx_vlan_header_insertion);
4688 adapter->max_tx_sg_entries =
4691 adapter->max_tx_sg_entries);
4694 adapter->rx_sg_supported =
4697 adapter->rx_sg_supported);
4700 adapter->opt_tx_comp_sub_queues =
4703 adapter->opt_tx_comp_sub_queues);
4706 adapter->opt_rx_comp_queues =
4709 adapter->opt_rx_comp_queues);
4712 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4715 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4718 adapter->opt_tx_entries_per_subcrq =
4721 adapter->opt_tx_entries_per_subcrq);
4724 adapter->opt_rxba_entries_per_subcrq =
4727 adapter->opt_rxba_entries_per_subcrq);
4730 adapter->tx_rx_desc_req = crq->query_capability.number;
4732 adapter->tx_rx_desc_req);
4741 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4742 adapter->wait_capability = false;
4743 send_request_cap(adapter, 0);
4747 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4756 mutex_lock(&adapter->fw_lock);
4757 adapter->fw_done_rc = 0;
4758 reinit_completion(&adapter->fw_done);
4760 rc = ibmvnic_send_crq(adapter, &crq);
4762 mutex_unlock(&adapter->fw_lock);
4766 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4768 mutex_unlock(&adapter->fw_lock);
4772 mutex_unlock(&adapter->fw_lock);
4773 return adapter->fw_done_rc ? -EIO : 0;
4777 struct ibmvnic_adapter *adapter)
4779 struct net_device *netdev = adapter->netdev;
4790 adapter->speed = SPEED_10;
4793 adapter->speed = SPEED_100;
4796 adapter->speed = SPEED_1000;
4799 adapter->speed = SPEED_10000;
4802 adapter->speed = SPEED_25000;
4805 adapter->speed = SPEED_40000;
4808 adapter->speed = SPEED_50000;
4811 adapter->speed = SPEED_100000;
4814 adapter->speed = SPEED_200000;
4819 adapter->speed = SPEED_UNKNOWN;
4822 adapter->duplex = DUPLEX_FULL;
4824 adapter->duplex = DUPLEX_HALF;
4826 adapter->duplex = DUPLEX_UNKNOWN;
4832 struct ibmvnic_adapter *adapter)
4835 struct net_device *netdev = adapter->netdev;
4836 struct device *dev = &adapter->vdev->dev;
4848 adapter->from_passive_init = true;
4852 adapter->login_pending = false;
4854 if (!completion_done(&adapter->init_done)) {
4855 complete(&adapter->init_done);
4856 adapter->init_done_rc = -EIO;
4858 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4861 * reset either because the adapter was still
4866 * is already scheduled or the adapter is
4872 adapter->failover_pending = false;
4877 adapter->crq.active = true;
4878 send_version_xchg(adapter);
4886 adapter->crq.active = false;
4890 if (!completion_done(&adapter->fw_done)) {
4891 adapter->fw_done_rc = -EIO;
4892 complete(&adapter->fw_done);
4896 if (!completion_done(&adapter->init_done)) {
4897 adapter->init_done_rc = -EAGAIN;
4898 complete(&adapter->init_done);
4901 if (!completion_done(&adapter->stats_done))
4902 complete(&adapter->stats_done);
4903 if (test_bit(0, &adapter->resetting))
4904 adapter->force_reset_recovery = true;
4906 dev_info(dev, "Migrated, re-enabling adapter\n");
4907 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4910 adapter->failover_pending = true;
4912 /* The adapter lost the connection */
4915 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4937 send_query_cap(adapter);
4940 handle_query_cap_rsp(crq, adapter);
4943 handle_query_map_rsp(crq, adapter);
4946 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4947 complete(&adapter->fw_done);
4950 handle_request_unmap_rsp(crq, adapter);
4953 handle_request_cap_rsp(crq, adapter);
4957 handle_login_rsp(crq, adapter);
4964 adapter->logical_link_state =
4966 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4967 complete(&adapter->init_done);
4971 adapter->phys_link_state =
4973 adapter->logical_link_state =
4975 if (adapter->phys_link_state && adapter->logical_link_state)
4982 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4986 handle_error_indication(crq, adapter);
4990 complete(&adapter->stats_done);
4994 handle_query_ip_offload_rsp(adapter);
5001 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5002 sizeof(adapter->ip_offload_ctrl),
5004 complete(&adapter->init_done);
5008 complete(&adapter->fw_done);
5011 handle_vpd_size_rsp(crq, adapter);
5014 handle_vpd_rsp(crq, adapter);
5017 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5018 complete(&adapter->fw_done);
5028 struct ibmvnic_adapter *adapter = instance;
5030 tasklet_schedule(&adapter->tasklet);
5036 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5037 struct ibmvnic_crq_queue *queue = &adapter->crq;
5045 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5052 ibmvnic_handle_crq(crq, adapter);
5059 if (atomic_read(&adapter->running_cap_crqs) != 0)
5060 adapter->wait_capability = true;
5064 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5066 struct vio_dev *vdev = adapter->vdev;
5074 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5079 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5081 struct ibmvnic_crq_queue *crq = &adapter->crq;
5082 struct device *dev = &adapter->vdev->dev;
5083 struct vio_dev *vdev = adapter->vdev;
5105 dev_warn(dev, "Partner adapter not ready\n");
5112 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5114 struct ibmvnic_crq_queue *crq = &adapter->crq;
5115 struct vio_dev *vdev = adapter->vdev;
5121 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5122 free_irq(vdev->irq, adapter);
5123 tasklet_kill(&adapter->tasklet);
5135 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5137 struct ibmvnic_crq_queue *crq = &adapter->crq;
5138 struct device *dev = &adapter->vdev->dev;
5139 struct vio_dev *vdev = adapter->vdev;
5162 rc = ibmvnic_reset_crq(adapter);
5166 dev_warn(dev, "Partner adapter not ready\n");
5168 dev_warn(dev, "Error %d opening adapter\n", rc);
5174 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5176 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5178 adapter->vdev->unit_address);
5179 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5196 tasklet_schedule(&adapter->tasklet);
5201 tasklet_kill(&adapter->tasklet);
5213 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5215 struct device *dev = &adapter->vdev->dev;
5217 u64 old_num_rx_queues = adapter->req_rx_queues;
5218 u64 old_num_tx_queues = adapter->req_tx_queues;
5221 adapter->from_passive_init = false;
5224 reinit_completion(&adapter->init_done);
5226 adapter->init_done_rc = 0;
5227 rc = ibmvnic_send_crq_init(adapter);
5233 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5238 if (adapter->init_done_rc) {
5239 release_crq_queue(adapter);
5240 return adapter->init_done_rc;
5243 if (adapter->from_passive_init) {
5244 adapter->state = VNIC_OPEN;
5245 adapter->from_passive_init = false;
5250 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5251 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5252 if (adapter->req_rx_queues != old_num_rx_queues ||
5253 adapter->req_tx_queues != old_num_tx_queues) {
5254 release_sub_crqs(adapter, 0);
5255 rc = init_sub_crqs(adapter);
5264 clean_tx_pools(adapter);
5266 rc = reset_sub_crq_queues(adapter);
5269 rc = init_sub_crqs(adapter);
5274 release_crq_queue(adapter);
5278 rc = init_sub_crq_irqs(adapter);
5281 release_crq_queue(adapter);
5291 struct ibmvnic_adapter *adapter;
5313 adapter = netdev_priv(netdev);
5314 adapter->state = VNIC_PROBING;
5316 adapter->vdev = dev;
5317 adapter->netdev = netdev;
5318 adapter->login_pending = false;
5320 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5321 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5327 spin_lock_init(&adapter->stats_lock);
5329 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5330 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5332 INIT_LIST_HEAD(&adapter->rwi_list);
5333 spin_lock_init(&adapter->rwi_lock);
5334 spin_lock_init(&adapter->state_lock);
5335 mutex_init(&adapter->fw_lock);
5336 init_completion(&adapter->init_done);
5337 init_completion(&adapter->fw_done);
5338 init_completion(&adapter->reset_done);
5339 init_completion(&adapter->stats_done);
5340 clear_bit(0, &adapter->resetting);
5343 rc = init_crq_queue(adapter);
5350 rc = ibmvnic_reset_init(adapter, false);
5355 rc = init_stats_buffers(adapter);
5359 rc = init_stats_token(adapter);
5363 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5364 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5365 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5373 adapter->state = VNIC_PROBED;
5375 adapter->wait_for_reset = false;
5376 adapter->last_reset_time = jiffies;
5391 release_stats_token(adapter);
5394 release_stats_buffers(adapter);
5397 release_sub_crqs(adapter, 1);
5398 release_crq_queue(adapter);
5399 mutex_destroy(&adapter->fw_lock);
5408 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5411 spin_lock_irqsave(&adapter->state_lock, flags);
5420 spin_lock(&adapter->rwi_lock);
5421 adapter->state = VNIC_REMOVING;
5422 spin_unlock(&adapter->rwi_lock);
5424 spin_unlock_irqrestore(&adapter->state_lock, flags);
5426 flush_work(&adapter->ibmvnic_reset);
5427 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5432 release_resources(adapter);
5433 release_sub_crqs(adapter, 1);
5434 release_crq_queue(adapter);
5436 release_stats_token(adapter);
5437 release_stats_buffers(adapter);
5439 adapter->state = VNIC_REMOVED;
5442 mutex_destroy(&adapter->fw_lock);
5454 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5462 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5473 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5489 struct ibmvnic_adapter *adapter;
5500 adapter = netdev_priv(netdev);
5505 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5508 for (i = 0; i < adapter->num_active_rx_pools; i++)
5509 ret += adapter->rx_pool[i].size *
5510 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5518 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5520 if (adapter->state != VNIC_OPEN)
5523 tasklet_schedule(&adapter->tasklet);