Lines Matching refs:channel
47 /* This is the weight assigned to each of the (per-channel) virtual
56 int efx_channel_dummy_op_int(struct efx_channel *channel)
61 void efx_channel_dummy_op_void(struct efx_channel *channel)
163 * We need a channel per event queue, plus a VI per tx queue.
262 /* Fall back to single channel MSI */
352 struct efx_channel *channel;
355 efx_for_each_channel(channel, efx) {
356 cpu = cpumask_local_spread(channel->channel,
358 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
364 struct efx_channel *channel;
366 efx_for_each_channel(channel, efx)
367 irq_set_affinity_hint(channel->irq, NULL);
383 struct efx_channel *channel;
386 efx_for_each_channel(channel, efx)
387 channel->irq = 0;
400 * Event queue memory allocations are done only once. If the channel
402 * errors during channel reset and also simplifies interrupt handling.
404 int efx_probe_eventq(struct efx_channel *channel)
406 struct efx_nic *efx = channel->efx;
410 "chan %d create event queue\n", channel->channel);
417 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
419 return efx_nic_probe_eventq(channel);
422 /* Prepare channel's event queue */
423 int efx_init_eventq(struct efx_channel *channel)
425 struct efx_nic *efx = channel->efx;
428 EFX_WARN_ON_PARANOID(channel->eventq_init);
431 "chan %d init event queue\n", channel->channel);
433 rc = efx_nic_init_eventq(channel);
435 efx->type->push_irq_moderation(channel);
436 channel->eventq_read_ptr = 0;
437 channel->eventq_init = true;
443 void efx_start_eventq(struct efx_channel *channel)
445 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
446 "chan %d start event queue\n", channel->channel);
449 channel->enabled = true;
452 napi_enable(&channel->napi_str);
453 efx_nic_eventq_read_ack(channel);
457 void efx_stop_eventq(struct efx_channel *channel)
459 if (!channel->enabled)
462 napi_disable(&channel->napi_str);
463 channel->enabled = false;
466 void efx_fini_eventq(struct efx_channel *channel)
468 if (!channel->eventq_init)
471 netif_dbg(channel->efx, drv, channel->efx->net_dev,
472 "chan %d fini event queue\n", channel->channel);
474 efx_nic_fini_eventq(channel);
475 channel->eventq_init = false;
478 void efx_remove_eventq(struct efx_channel *channel)
480 netif_dbg(channel->efx, drv, channel->efx->net_dev,
481 "chan %d remove event queue\n", channel->channel);
483 efx_nic_remove_eventq(channel);
496 struct efx_channel *channel;
499 channel = container_of(dwork, struct efx_channel, filter_work);
500 time = jiffies - channel->rfs_last_expiry;
501 quota = channel->rfs_filter_count * time / (30 * HZ);
502 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
503 channel->rfs_last_expiry += time;
509 /* Allocate and initialise a channel structure. */
514 struct efx_channel *channel;
517 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
518 if (!channel)
521 channel->efx = efx;
522 channel->channel = i;
523 channel->type = &efx_default_channel_type;
526 tx_queue = &channel->tx_queue[j];
530 tx_queue->channel = channel;
534 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
537 rx_queue = &channel->rx_queue;
541 return channel;
549 efx->channel[i] = efx_alloc_channel(efx, i);
550 if (!efx->channel[i])
571 if (efx->channel[i]) {
572 kfree(efx->channel[i]);
573 efx->channel[i] = NULL;
577 /* Allocate and initialise a channel structure, copying parameters
578 * (but not resources) from an old channel structure.
584 struct efx_channel *channel;
587 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
588 if (!channel)
591 *channel = *old_channel;
593 channel->napi_dev = NULL;
594 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
595 channel->napi_str.napi_id = 0;
596 channel->napi_str.state = 0;
597 memset(&channel->eventq, 0, sizeof(channel->eventq));
600 tx_queue = &channel->tx_queue[j];
601 if (tx_queue->channel)
602 tx_queue->channel = channel;
608 rx_queue = &channel->rx_queue;
613 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
616 return channel;
619 static int efx_probe_channel(struct efx_channel *channel)
625 netif_dbg(channel->efx, probe, channel->efx->net_dev,
626 "creating channel %d\n", channel->channel);
628 rc = channel->type->pre_probe(channel);
632 rc = efx_probe_eventq(channel);
636 efx_for_each_channel_tx_queue(tx_queue, channel) {
642 efx_for_each_channel_rx_queue(rx_queue, channel) {
648 channel->rx_list = NULL;
653 efx_remove_channel(channel);
657 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
659 struct efx_nic *efx = channel->efx;
663 number = channel->channel;
682 struct efx_channel *channel;
684 efx_for_each_channel(channel, efx)
685 channel->type->get_name(channel,
686 efx->msi_context[channel->channel].name,
692 struct efx_channel *channel;
703 efx_for_each_channel_rev(channel, efx) {
704 rc = efx_probe_channel(channel);
707 "failed to create channel %d\n",
708 channel->channel);
721 void efx_remove_channel(struct efx_channel *channel)
726 netif_dbg(channel->efx, drv, channel->efx->net_dev,
727 "destroy chan %d\n", channel->channel);
729 efx_for_each_channel_rx_queue(rx_queue, channel)
731 efx_for_each_channel_tx_queue(tx_queue, channel)
733 efx_remove_eventq(channel);
734 channel->type->post_remove(channel);
739 struct efx_channel *channel;
741 efx_for_each_channel(channel, efx)
742 efx_remove_channel(channel);
749 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
763 efx_for_each_channel(channel, efx) {
767 if (channel->type->copy)
770 channel->eventq.index +
771 channel->eventq.entries);
772 efx_for_each_channel_rx_queue(rx_queue, channel)
776 efx_for_each_channel_tx_queue(tx_queue, channel)
789 channel = efx->channel[i];
790 if (channel->type->copy)
791 channel = channel->type->copy(channel);
792 if (!channel) {
796 other_channel[i] = channel;
799 /* Swap entry counts and channel pointers */
805 swap(efx->channel[i], other_channel[i]);
811 channel = efx->channel[i];
812 if (!channel->type->copy)
814 rc = efx_probe_channel(channel);
817 efx_init_napi_channel(efx->channel[i]);
822 /* Destroy unused channel structures */
824 channel = other_channel[i];
825 if (channel && channel->type->copy) {
826 efx_fini_napi_channel(channel);
827 efx_remove_channel(channel);
828 kfree(channel);
837 "unable to restart interrupts on channel reallocation\n");
850 swap(efx->channel[i], other_channel[i]);
858 struct efx_channel *channel;
879 efx_for_each_channel(channel, efx) {
880 if (channel->channel < efx->n_rx_channels)
881 channel->rx_queue.core_index = channel->channel;
883 channel->rx_queue.core_index = -1;
885 if (channel->channel >= efx->tx_channel_offset) {
886 if (efx_channel_is_xdp_tx(channel)) {
887 efx_for_each_channel_tx_queue(tx_queue, channel) {
898 channel->channel, tx_queue->label,
905 efx_for_each_channel_tx_queue(tx_queue, channel) {
908 channel->channel, tx_queue->label,
922 bool efx_default_channel_want_txqs(struct efx_channel *channel)
924 return channel->channel - channel->efx->tx_channel_offset <
925 channel->efx->n_tx_channels;
934 struct efx_channel *channel, *end_channel;
942 efx_for_each_channel(channel, efx) {
943 if (!channel->type->keep_eventq) {
944 rc = efx_init_eventq(channel);
948 efx_start_eventq(channel);
955 end_channel = channel;
956 efx_for_each_channel(channel, efx) {
957 if (channel == end_channel)
959 efx_stop_eventq(channel);
960 if (!channel->type->keep_eventq)
961 efx_fini_eventq(channel);
969 struct efx_channel *channel;
982 efx_for_each_channel(channel, efx) {
983 if (channel->irq)
984 synchronize_irq(channel->irq);
986 efx_stop_eventq(channel);
987 if (!channel->type->keep_eventq)
988 efx_fini_eventq(channel);
997 struct efx_channel *channel, *end_channel;
1010 efx_for_each_channel(channel, efx) {
1011 if (channel->type->keep_eventq) {
1012 rc = efx_init_eventq(channel);
1025 end_channel = channel;
1026 efx_for_each_channel(channel, efx) {
1027 if (channel == end_channel)
1029 if (channel->type->keep_eventq)
1030 efx_fini_eventq(channel);
1040 struct efx_channel *channel;
1044 efx_for_each_channel(channel, efx) {
1045 if (channel->type->keep_eventq)
1046 efx_fini_eventq(channel);
1056 struct efx_channel *channel;
1058 efx_for_each_channel(channel, efx) {
1059 efx_for_each_channel_tx_queue(tx_queue, channel) {
1064 efx_for_each_channel_rx_queue(rx_queue, channel) {
1067 efx_stop_eventq(channel);
1069 efx_start_eventq(channel);
1072 WARN_ON(channel->rx_pkt_n_frags);
1080 struct efx_channel *channel;
1084 efx_for_each_channel(channel, efx) {
1085 efx_for_each_channel_rx_queue(rx_queue, channel)
1089 efx_for_each_channel(channel, efx) {
1096 if (efx_channel_has_rx_queue(channel)) {
1097 efx_stop_eventq(channel);
1098 efx_start_eventq(channel);
1112 efx_for_each_channel(channel, efx) {
1113 efx_for_each_channel_rx_queue(rx_queue, channel)
1115 efx_for_each_channel_tx_queue(tx_queue, channel)
1126 /* Process channel's event queue
1129 * single channel. The caller must guarantee that this function will
1130 * never be concurrently called more than once on the same channel,
1133 static int efx_process_channel(struct efx_channel *channel, int budget)
1139 if (unlikely(!channel->enabled))
1143 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1145 channel->rx_list = &rx_list;
1147 efx_for_each_channel_tx_queue(tx_queue, channel) {
1152 spent = efx_nic_process_eventq(channel, budget);
1153 if (spent && efx_channel_has_rx_queue(channel)) {
1155 efx_channel_get_rx_queue(channel);
1157 efx_rx_flush_packet(channel);
1162 efx_for_each_channel_tx_queue(tx_queue, channel) {
1171 netif_receive_skb_list(channel->rx_list);
1172 channel->rx_list = NULL;
1177 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1181 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1182 if (channel->irq_moderation_us > step) {
1183 channel->irq_moderation_us -= step;
1184 efx->type->push_irq_moderation(channel);
1186 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1187 if (channel->irq_moderation_us <
1189 channel->irq_moderation_us += step;
1190 efx->type->push_irq_moderation(channel);
1194 channel->irq_count = 0;
1195 channel->irq_mod_score = 0;
1205 struct efx_channel *channel =
1207 struct efx_nic *efx = channel->efx;
1214 "channel %d NAPI poll executing on CPU %d\n",
1215 channel->channel, raw_smp_processor_id());
1217 spent = efx_process_channel(channel, budget);
1222 if (efx_channel_has_rx_queue(channel) &&
1224 unlikely(++channel->irq_count == 1000)) {
1225 efx_update_irq_mod(efx, channel);
1230 time = jiffies - channel->rfs_last_expiry;
1232 if (channel->rfs_filter_count * time >= 600 * HZ)
1233 mod_delayed_work(system_wq, &channel->filter_work, 0);
1242 efx_nic_eventq_read_ack(channel);
1248 void efx_init_napi_channel(struct efx_channel *channel)
1250 struct efx_nic *efx = channel->efx;
1252 channel->napi_dev = efx->net_dev;
1253 netif_napi_add(channel->napi_dev, &channel->napi_str,
1259 struct efx_channel *channel;
1261 efx_for_each_channel(channel, efx)
1262 efx_init_napi_channel(channel);
1265 void efx_fini_napi_channel(struct efx_channel *channel)
1267 if (channel->napi_dev)
1268 netif_napi_del(&channel->napi_str);
1270 channel->napi_dev = NULL;
1275 struct efx_channel *channel;
1277 efx_for_each_channel(channel, efx)
1278 efx_fini_napi_channel(channel);