Lines Matching defs:wg

47 static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
92 static void wg_receive_handshake_packet(struct wg_device *wg,
106 wg->dev->name, skb);
108 (struct message_handshake_cookie *)skb->data, wg);
112 under_load = atomic_read(&wg->handshake_queue_len) >=
121 mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
130 wg->dev->name, skb);
140 wg_packet_send_handshake_cookie(wg, skb,
144 peer = wg_noise_handshake_consume_initiation(message, wg);
147 wg->dev->name, skb);
152 wg->dev->name, peer->internal_id,
162 wg_packet_send_handshake_cookie(wg, skb,
166 peer = wg_noise_handshake_consume_response(message, wg);
169 wg->dev->name, skb);
174 wg->dev->name, peer->internal_id,
209 struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
213 wg_receive_handshake_packet(wg, skb);
215 atomic_dec(&wg->handshake_queue_len);
509 static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
518 wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
526 ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
527 wg->packet_crypt_wq);
542 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
544 if (unlikely(prepare_skb_header(skb, wg) < 0))
554 if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
555 if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
556 ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
557 spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
560 ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
564 wg->dev->name, skb);
567 atomic_inc(&wg->handshake_queue_len);
568 cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
570 queue_work_on(cpu, wg->handshake_receive_wq,
571 &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
576 wg_packet_consume_data(wg, skb);