Lines Matching defs:net_device
33 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
94 struct netvsc_device *net_device;
96 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
97 if (!net_device)
100 init_waitqueue_head(&net_device->wait_drain);
101 net_device->destroy = false;
102 net_device->tx_disable = true;
104 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
105 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
107 init_completion(&net_device->channel_init_wait);
108 init_waitqueue_head(&net_device->subchan_open);
109 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
111 return net_device;
139 struct netvsc_device *net_device,
140 struct net_device *ndev)
151 if (net_device->recv_section_cnt) {
153 revoke_packet = &net_device->revoke_packet;
184 net_device->recv_section_cnt = 0;
189 struct netvsc_device *net_device,
190 struct net_device *ndev)
201 if (net_device->send_section_cnt) {
203 revoke_packet = &net_device->revoke_packet;
235 net_device->send_section_cnt = 0;
240 struct netvsc_device *net_device,
241 struct net_device *ndev)
245 if (net_device->recv_buf_gpadl_handle) {
247 net_device->recv_buf_gpadl_handle);
257 net_device->recv_buf_gpadl_handle = 0;
262 struct netvsc_device *net_device,
263 struct net_device *ndev)
267 if (net_device->send_buf_gpadl_handle) {
269 net_device->send_buf_gpadl_handle);
279 net_device->send_buf_gpadl_handle = 0;
283 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
285 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
289 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
298 struct netvsc_device *net_device,
302 struct net_device *ndev = hv_get_drvdata(device);
313 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
317 net_device->recv_buf = vzalloc(buf_size);
318 if (!net_device->recv_buf) {
326 net_device->recv_buf_size = buf_size;
333 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
335 &net_device->recv_buf_gpadl_handle);
343 init_packet = &net_device->channel_init_pkt;
347 gpadl_handle = net_device->recv_buf_gpadl_handle;
365 wait_for_completion(&net_device->channel_init_wait);
388 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
389 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
392 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
393 (u64)net_device->recv_section_cnt > (u64)buf_size) {
395 net_device->recv_section_size);
404 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
405 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
413 net_device->send_buf = vzalloc(buf_size);
414 if (!net_device->send_buf) {
425 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
427 &net_device->send_buf_gpadl_handle);
435 init_packet = &net_device->channel_init_pkt;
439 net_device->send_buf_gpadl_handle;
456 wait_for_completion(&net_device->channel_init_wait);
470 net_device->send_section_size = init_packet->msg.
472 if (net_device->send_section_size < NETVSC_MTU_MIN) {
474 net_device->send_section_size);
480 net_device->send_section_cnt = buf_size / net_device->send_section_size;
483 net_device->send_section_size, net_device->send_section_cnt);
486 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
488 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
489 if (net_device->send_section_map == NULL) {
497 netvsc_revoke_recv_buf(device, net_device, ndev);
498 netvsc_revoke_send_buf(device, net_device, ndev);
499 netvsc_teardown_recv_gpadl(device, net_device, ndev);
500 netvsc_teardown_send_gpadl(device, net_device, ndev);
508 struct netvsc_device *net_device,
512 struct net_device *ndev = hv_get_drvdata(device);
531 wait_for_completion(&net_device->channel_init_wait);
567 struct netvsc_device *net_device,
570 struct net_device *ndev = hv_get_drvdata(device);
579 init_packet = &net_device->channel_init_pkt;
583 if (negotiate_nvsp_ver(device, net_device, init_packet,
585 net_device->nvsp_version = ver_list[i];
594 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
599 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
623 ret = netvsc_init_buf(device, net_device, device_info);
634 struct net_device *ndev = hv_get_drvdata(device);
636 struct netvsc_device *net_device
644 netvsc_revoke_recv_buf(device, net_device, ndev);
646 netvsc_teardown_recv_gpadl(device, net_device, ndev);
648 netvsc_revoke_send_buf(device, net_device, ndev);
650 netvsc_teardown_send_gpadl(device, net_device, ndev);
655 for (i = 0; i < net_device->num_chn; i++) {
659 napi_disable(&net_device->chan_table[i].napi);
661 netif_napi_del(&net_device->chan_table[i].napi);
665 * At this point, no one should be accessing net_device
678 netvsc_teardown_recv_gpadl(device, net_device, ndev);
679 netvsc_teardown_send_gpadl(device, net_device, ndev);
683 free_netvsc_device_rcu(net_device);
689 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
692 sync_change_bit(index, net_device->send_section_map);
695 static void netvsc_send_tx_complete(struct net_device *ndev,
696 struct netvsc_device *net_device,
714 netvsc_free_send_slot(net_device, send_index);
717 tx_stats = &net_device->chan_table[q_idx].tx_stats;
728 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
730 if (unlikely(net_device->destroy)) {
732 wake_up(&net_device->wait_drain);
736 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
745 static void netvsc_send_completion(struct net_device *ndev,
746 struct netvsc_device *net_device,
796 memcpy(&net_device->channel_init_pkt, nvsp_packet,
798 complete(&net_device->channel_init_wait);
802 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
813 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
815 unsigned long *map_addr = net_device->send_section_map;
818 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
826 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
834 char *start = net_device->send_buf;
835 char *dest = start + (section_index * net_device->send_section_size)
844 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
846 padding = net_device->pkt_align - remain;
867 struct netvsc_device *net_device,
875 &net_device->chan_table[packet->q_idx];
877 struct net_device *ndev = hv_get_drvdata(device);
937 !net_device->tx_disable) {
960 int netvsc_send(struct net_device *ndev,
968 struct netvsc_device *net_device
981 if (unlikely(!net_device || net_device->destroy))
984 nvchan = &net_device->chan_table[packet->q_idx];
993 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1000 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1001 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1002 net_device->send_section_size) {
1006 net_device->send_section_size) {
1010 } else if (pktlen + net_device->pkt_align <
1011 net_device->send_section_size) {
1012 section_index = netvsc_get_next_send_section(net_device);
1029 netvsc_copy_to_send_buf(net_device,
1067 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1071 netvsc_free_send_slot(net_device,
1078 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1081 netvsc_free_send_slot(net_device, section_index);
1087 static int send_recv_completions(struct net_device *ndev,
1142 static void enq_receive_complete(struct net_device *ndev,
1172 static int netvsc_receive(struct net_device *ndev,
1173 struct netvsc_device *net_device,
1184 char *recv_buf = net_device->recv_buf;
1238 if (unlikely(offset > net_device->recv_buf_size ||
1239 buflen > net_device->recv_buf_size - offset)) {
1256 ret = rndis_filter_receive(ndev, net_device,
1266 enq_receive_complete(ndev, net_device, q_idx,
1272 static void netvsc_send_table(struct net_device *ndev,
1318 static void netvsc_send_vf(struct net_device *ndev,
1342 static void netvsc_receive_inband(struct net_device *ndev,
1368 struct netvsc_device *net_device,
1369 struct net_device *ndev,
1380 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1384 return netvsc_receive(ndev, net_device, nvchan, desc);
1388 netvsc_receive_inband(ndev, net_device, desc);
1415 struct netvsc_device *net_device = nvchan->net_device;
1418 struct net_device *ndev = hv_get_drvdata(device);
1427 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1433 ret = send_recv_completions(ndev, net_device, nvchan);
1481 struct netvsc_device *net_device;
1482 struct net_device *ndev = hv_get_drvdata(device);
1485 net_device = alloc_net_device();
1486 if (!net_device)
1505 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1508 nvchan->net_device = net_device;
1529 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1535 netvsc_channel_cb, net_device->chan_table);
1545 napi_enable(&net_device->chan_table[0].napi);
1548 ret = netvsc_connect_vsp(device, net_device, device_info);
1558 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1560 return net_device;
1564 napi_disable(&net_device->chan_table[0].napi);
1570 netif_napi_del(&net_device->chan_table[0].napi);
1573 free_netvsc_device(&net_device->rcu);