Lines Matching defs:wg
54 static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
99 static void wg_receive_handshake_packet(struct wg_device *wg,
113 wg->dev->name, skb);
115 (struct message_handshake_cookie *)skb->data, wg);
119 under_load = atomic_read(&wg->handshake_queue_len) >=
128 mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
137 wg->dev->name, skb);
147 wg_packet_send_handshake_cookie(wg, skb,
151 peer = wg_noise_handshake_consume_initiation(message, wg);
154 wg->dev->name, skb);
159 wg->dev->name, peer->internal_id,
169 wg_packet_send_handshake_cookie(wg, skb,
173 peer = wg_noise_handshake_consume_response(message, wg);
176 wg->dev->name, skb);
181 wg->dev->name, peer->internal_id,
216 struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
220 wg_receive_handshake_packet(wg, skb);
222 atomic_dec(&wg->handshake_queue_len);
516 static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
525 wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
533 ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
534 wg->packet_crypt_wq);
549 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
551 if (unlikely(prepare_skb_header(skb, wg) < 0))
561 if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
562 if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
563 ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
564 spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
567 ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
571 wg->dev->name, skb);
574 atomic_inc(&wg->handshake_queue_len);
575 cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
577 queue_work_on(cpu, wg->handshake_receive_wq,
578 &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
583 wg_packet_consume_data(wg, skb);