Lines Matching refs:wg

54 static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)  in prepare_skb_header()  argument
99 static void wg_receive_handshake_packet(struct wg_device *wg, in wg_receive_handshake_packet() argument
113 wg->dev->name, skb); in wg_receive_handshake_packet()
115 (struct message_handshake_cookie *)skb->data, wg); in wg_receive_handshake_packet()
119 under_load = atomic_read(&wg->handshake_queue_len) >= in wg_receive_handshake_packet()
128 mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, in wg_receive_handshake_packet()
137 wg->dev->name, skb); in wg_receive_handshake_packet()
147 wg_packet_send_handshake_cookie(wg, skb, in wg_receive_handshake_packet()
151 peer = wg_noise_handshake_consume_initiation(message, wg); in wg_receive_handshake_packet()
154 wg->dev->name, skb); in wg_receive_handshake_packet()
159 wg->dev->name, peer->internal_id, in wg_receive_handshake_packet()
169 wg_packet_send_handshake_cookie(wg, skb, in wg_receive_handshake_packet()
173 peer = wg_noise_handshake_consume_response(message, wg); in wg_receive_handshake_packet()
176 wg->dev->name, skb); in wg_receive_handshake_packet()
181 wg->dev->name, peer->internal_id, in wg_receive_handshake_packet()
216 struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); in wg_packet_handshake_receive_worker() local
220 wg_receive_handshake_packet(wg, skb); in wg_packet_handshake_receive_worker()
222 atomic_dec(&wg->handshake_queue_len); in wg_packet_handshake_receive_worker()
516 static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) in wg_packet_consume_data() argument
525 wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, in wg_packet_consume_data()
533 ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, in wg_packet_consume_data()
534 wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); in wg_packet_consume_data()
549 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) in wg_packet_receive() argument
551 if (unlikely(prepare_skb_header(skb, wg) < 0)) in wg_packet_receive()
561 if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { in wg_packet_receive()
562 if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { in wg_packet_receive()
563 ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); in wg_packet_receive()
564 spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); in wg_packet_receive()
567 ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); in wg_packet_receive()
571 wg->dev->name, skb); in wg_packet_receive()
574 atomic_inc(&wg->handshake_queue_len); in wg_packet_receive()
575 cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); in wg_packet_receive()
577 queue_work_on(cpu, wg->handshake_receive_wq, in wg_packet_receive()
578 &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); in wg_packet_receive()
583 wg_packet_consume_data(wg, skb); in wg_packet_receive()