xref: /kernel/linux/linux-6.6/net/core/sock_map.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/filter.h>
7#include <linux/errno.h>
8#include <linux/file.h>
9#include <linux/net.h>
10#include <linux/workqueue.h>
11#include <linux/skmsg.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/sock_diag.h>
15#include <net/udp.h>
16
17struct bpf_stab {
18	struct bpf_map map;
19	struct sock **sks;
20	struct sk_psock_progs progs;
21	spinlock_t lock;
22};
23
24#define SOCK_CREATE_FLAG_MASK				\
25	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
28				struct bpf_prog *old, u32 which);
29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
30
31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
32{
33	struct bpf_stab *stab;
34
35	if (attr->max_entries == 0 ||
36	    attr->key_size    != 4 ||
37	    (attr->value_size != sizeof(u32) &&
38	     attr->value_size != sizeof(u64)) ||
39	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
40		return ERR_PTR(-EINVAL);
41
42	stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE);
43	if (!stab)
44		return ERR_PTR(-ENOMEM);
45
46	bpf_map_init_from_attr(&stab->map, attr);
47	spin_lock_init(&stab->lock);
48
49	stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
50				       sizeof(struct sock *),
51				       stab->map.numa_node);
52	if (!stab->sks) {
53		bpf_map_area_free(stab);
54		return ERR_PTR(-ENOMEM);
55	}
56
57	return &stab->map;
58}
59
60int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
61{
62	u32 ufd = attr->target_fd;
63	struct bpf_map *map;
64	struct fd f;
65	int ret;
66
67	if (attr->attach_flags || attr->replace_bpf_fd)
68		return -EINVAL;
69
70	f = fdget(ufd);
71	map = __bpf_map_get(f);
72	if (IS_ERR(map))
73		return PTR_ERR(map);
74	ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
75	fdput(f);
76	return ret;
77}
78
79int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
80{
81	u32 ufd = attr->target_fd;
82	struct bpf_prog *prog;
83	struct bpf_map *map;
84	struct fd f;
85	int ret;
86
87	if (attr->attach_flags || attr->replace_bpf_fd)
88		return -EINVAL;
89
90	f = fdget(ufd);
91	map = __bpf_map_get(f);
92	if (IS_ERR(map))
93		return PTR_ERR(map);
94
95	prog = bpf_prog_get(attr->attach_bpf_fd);
96	if (IS_ERR(prog)) {
97		ret = PTR_ERR(prog);
98		goto put_map;
99	}
100
101	if (prog->type != ptype) {
102		ret = -EINVAL;
103		goto put_prog;
104	}
105
106	ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
107put_prog:
108	bpf_prog_put(prog);
109put_map:
110	fdput(f);
111	return ret;
112}
113
114static void sock_map_sk_acquire(struct sock *sk)
115	__acquires(&sk->sk_lock.slock)
116{
117	lock_sock(sk);
118	rcu_read_lock();
119}
120
121static void sock_map_sk_release(struct sock *sk)
122	__releases(&sk->sk_lock.slock)
123{
124	rcu_read_unlock();
125	release_sock(sk);
126}
127
128static void sock_map_add_link(struct sk_psock *psock,
129			      struct sk_psock_link *link,
130			      struct bpf_map *map, void *link_raw)
131{
132	link->link_raw = link_raw;
133	link->map = map;
134	spin_lock_bh(&psock->link_lock);
135	list_add_tail(&link->list, &psock->link);
136	spin_unlock_bh(&psock->link_lock);
137}
138
139static void sock_map_del_link(struct sock *sk,
140			      struct sk_psock *psock, void *link_raw)
141{
142	bool strp_stop = false, verdict_stop = false;
143	struct sk_psock_link *link, *tmp;
144
145	spin_lock_bh(&psock->link_lock);
146	list_for_each_entry_safe(link, tmp, &psock->link, list) {
147		if (link->link_raw == link_raw) {
148			struct bpf_map *map = link->map;
149			struct sk_psock_progs *progs = sock_map_progs(map);
150
151			if (psock->saved_data_ready && progs->stream_parser)
152				strp_stop = true;
153			if (psock->saved_data_ready && progs->stream_verdict)
154				verdict_stop = true;
155			if (psock->saved_data_ready && progs->skb_verdict)
156				verdict_stop = true;
157			list_del(&link->list);
158			sk_psock_free_link(link);
159		}
160	}
161	spin_unlock_bh(&psock->link_lock);
162	if (strp_stop || verdict_stop) {
163		write_lock_bh(&sk->sk_callback_lock);
164		if (strp_stop)
165			sk_psock_stop_strp(sk, psock);
166		if (verdict_stop)
167			sk_psock_stop_verdict(sk, psock);
168
169		if (psock->psock_update_sk_prot)
170			psock->psock_update_sk_prot(sk, psock, false);
171		write_unlock_bh(&sk->sk_callback_lock);
172	}
173}
174
175static void sock_map_unref(struct sock *sk, void *link_raw)
176{
177	struct sk_psock *psock = sk_psock(sk);
178
179	if (likely(psock)) {
180		sock_map_del_link(sk, psock, link_raw);
181		sk_psock_put(sk, psock);
182	}
183}
184
185static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
186{
187	if (!sk->sk_prot->psock_update_sk_prot)
188		return -EINVAL;
189	psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
190	return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
191}
192
193static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
194{
195	struct sk_psock *psock;
196
197	rcu_read_lock();
198	psock = sk_psock(sk);
199	if (psock) {
200		if (sk->sk_prot->close != sock_map_close) {
201			psock = ERR_PTR(-EBUSY);
202			goto out;
203		}
204
205		if (!refcount_inc_not_zero(&psock->refcnt))
206			psock = ERR_PTR(-EBUSY);
207	}
208out:
209	rcu_read_unlock();
210	return psock;
211}
212
213static int sock_map_link(struct bpf_map *map, struct sock *sk)
214{
215	struct sk_psock_progs *progs = sock_map_progs(map);
216	struct bpf_prog *stream_verdict = NULL;
217	struct bpf_prog *stream_parser = NULL;
218	struct bpf_prog *skb_verdict = NULL;
219	struct bpf_prog *msg_parser = NULL;
220	struct sk_psock *psock;
221	int ret;
222
223	stream_verdict = READ_ONCE(progs->stream_verdict);
224	if (stream_verdict) {
225		stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
226		if (IS_ERR(stream_verdict))
227			return PTR_ERR(stream_verdict);
228	}
229
230	stream_parser = READ_ONCE(progs->stream_parser);
231	if (stream_parser) {
232		stream_parser = bpf_prog_inc_not_zero(stream_parser);
233		if (IS_ERR(stream_parser)) {
234			ret = PTR_ERR(stream_parser);
235			goto out_put_stream_verdict;
236		}
237	}
238
239	msg_parser = READ_ONCE(progs->msg_parser);
240	if (msg_parser) {
241		msg_parser = bpf_prog_inc_not_zero(msg_parser);
242		if (IS_ERR(msg_parser)) {
243			ret = PTR_ERR(msg_parser);
244			goto out_put_stream_parser;
245		}
246	}
247
248	skb_verdict = READ_ONCE(progs->skb_verdict);
249	if (skb_verdict) {
250		skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
251		if (IS_ERR(skb_verdict)) {
252			ret = PTR_ERR(skb_verdict);
253			goto out_put_msg_parser;
254		}
255	}
256
257	psock = sock_map_psock_get_checked(sk);
258	if (IS_ERR(psock)) {
259		ret = PTR_ERR(psock);
260		goto out_progs;
261	}
262
263	if (psock) {
264		if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
265		    (stream_parser  && READ_ONCE(psock->progs.stream_parser)) ||
266		    (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
267		    (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
268		    (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
269		    (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
270			sk_psock_put(sk, psock);
271			ret = -EBUSY;
272			goto out_progs;
273		}
274	} else {
275		psock = sk_psock_init(sk, map->numa_node);
276		if (IS_ERR(psock)) {
277			ret = PTR_ERR(psock);
278			goto out_progs;
279		}
280	}
281
282	if (msg_parser)
283		psock_set_prog(&psock->progs.msg_parser, msg_parser);
284	if (stream_parser)
285		psock_set_prog(&psock->progs.stream_parser, stream_parser);
286	if (stream_verdict)
287		psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
288	if (skb_verdict)
289		psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
290
291	/* msg_* and stream_* programs references tracked in psock after this
292	 * point. Reference dec and cleanup will occur through psock destructor
293	 */
294	ret = sock_map_init_proto(sk, psock);
295	if (ret < 0) {
296		sk_psock_put(sk, psock);
297		goto out;
298	}
299
300	write_lock_bh(&sk->sk_callback_lock);
301	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
302		ret = sk_psock_init_strp(sk, psock);
303		if (ret) {
304			write_unlock_bh(&sk->sk_callback_lock);
305			sk_psock_put(sk, psock);
306			goto out;
307		}
308		sk_psock_start_strp(sk, psock);
309	} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
310		sk_psock_start_verdict(sk,psock);
311	} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
312		sk_psock_start_verdict(sk, psock);
313	}
314	write_unlock_bh(&sk->sk_callback_lock);
315	return 0;
316out_progs:
317	if (skb_verdict)
318		bpf_prog_put(skb_verdict);
319out_put_msg_parser:
320	if (msg_parser)
321		bpf_prog_put(msg_parser);
322out_put_stream_parser:
323	if (stream_parser)
324		bpf_prog_put(stream_parser);
325out_put_stream_verdict:
326	if (stream_verdict)
327		bpf_prog_put(stream_verdict);
328out:
329	return ret;
330}
331
332static void sock_map_free(struct bpf_map *map)
333{
334	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
335	int i;
336
337	/* After the sync no updates or deletes will be in-flight so it
338	 * is safe to walk map and remove entries without risking a race
339	 * in EEXIST update case.
340	 */
341	synchronize_rcu();
342	for (i = 0; i < stab->map.max_entries; i++) {
343		struct sock **psk = &stab->sks[i];
344		struct sock *sk;
345
346		sk = xchg(psk, NULL);
347		if (sk) {
348			sock_hold(sk);
349			lock_sock(sk);
350			rcu_read_lock();
351			sock_map_unref(sk, psk);
352			rcu_read_unlock();
353			release_sock(sk);
354			sock_put(sk);
355		}
356	}
357
358	/* wait for psock readers accessing its map link */
359	synchronize_rcu();
360
361	bpf_map_area_free(stab->sks);
362	bpf_map_area_free(stab);
363}
364
365static void sock_map_release_progs(struct bpf_map *map)
366{
367	psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
368}
369
370static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
371{
372	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
373
374	WARN_ON_ONCE(!rcu_read_lock_held());
375
376	if (unlikely(key >= map->max_entries))
377		return NULL;
378	return READ_ONCE(stab->sks[key]);
379}
380
381static void *sock_map_lookup(struct bpf_map *map, void *key)
382{
383	struct sock *sk;
384
385	sk = __sock_map_lookup_elem(map, *(u32 *)key);
386	if (!sk)
387		return NULL;
388	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
389		return NULL;
390	return sk;
391}
392
393static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
394{
395	struct sock *sk;
396
397	if (map->value_size != sizeof(u64))
398		return ERR_PTR(-ENOSPC);
399
400	sk = __sock_map_lookup_elem(map, *(u32 *)key);
401	if (!sk)
402		return ERR_PTR(-ENOENT);
403
404	__sock_gen_cookie(sk);
405	return &sk->sk_cookie;
406}
407
408static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
409			     struct sock **psk)
410{
411	struct sock *sk;
412	int err = 0;
413
414	spin_lock_bh(&stab->lock);
415	sk = *psk;
416	if (!sk_test || sk_test == sk)
417		sk = xchg(psk, NULL);
418
419	if (likely(sk))
420		sock_map_unref(sk, psk);
421	else
422		err = -EINVAL;
423
424	spin_unlock_bh(&stab->lock);
425	return err;
426}
427
428static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
429				      void *link_raw)
430{
431	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
432
433	__sock_map_delete(stab, sk, link_raw);
434}
435
436static long sock_map_delete_elem(struct bpf_map *map, void *key)
437{
438	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
439	u32 i = *(u32 *)key;
440	struct sock **psk;
441
442	if (unlikely(i >= map->max_entries))
443		return -EINVAL;
444
445	psk = &stab->sks[i];
446	return __sock_map_delete(stab, NULL, psk);
447}
448
449static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
450{
451	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
452	u32 i = key ? *(u32 *)key : U32_MAX;
453	u32 *key_next = next;
454
455	if (i == stab->map.max_entries - 1)
456		return -ENOENT;
457	if (i >= stab->map.max_entries)
458		*key_next = 0;
459	else
460		*key_next = i + 1;
461	return 0;
462}
463
464static int sock_map_update_common(struct bpf_map *map, u32 idx,
465				  struct sock *sk, u64 flags)
466{
467	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
468	struct sk_psock_link *link;
469	struct sk_psock *psock;
470	struct sock *osk;
471	int ret;
472
473	WARN_ON_ONCE(!rcu_read_lock_held());
474	if (unlikely(flags > BPF_EXIST))
475		return -EINVAL;
476	if (unlikely(idx >= map->max_entries))
477		return -E2BIG;
478
479	link = sk_psock_init_link();
480	if (!link)
481		return -ENOMEM;
482
483	ret = sock_map_link(map, sk);
484	if (ret < 0)
485		goto out_free;
486
487	psock = sk_psock(sk);
488	WARN_ON_ONCE(!psock);
489
490	spin_lock_bh(&stab->lock);
491	osk = stab->sks[idx];
492	if (osk && flags == BPF_NOEXIST) {
493		ret = -EEXIST;
494		goto out_unlock;
495	} else if (!osk && flags == BPF_EXIST) {
496		ret = -ENOENT;
497		goto out_unlock;
498	}
499
500	sock_map_add_link(psock, link, map, &stab->sks[idx]);
501	stab->sks[idx] = sk;
502	if (osk)
503		sock_map_unref(osk, &stab->sks[idx]);
504	spin_unlock_bh(&stab->lock);
505	return 0;
506out_unlock:
507	spin_unlock_bh(&stab->lock);
508	if (psock)
509		sk_psock_put(sk, psock);
510out_free:
511	sk_psock_free_link(link);
512	return ret;
513}
514
515static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
516{
517	return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
518	       ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
519	       ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
520}
521
522static bool sock_map_redirect_allowed(const struct sock *sk)
523{
524	if (sk_is_tcp(sk))
525		return sk->sk_state != TCP_LISTEN;
526	else
527		return sk->sk_state == TCP_ESTABLISHED;
528}
529
530static bool sock_map_sk_is_suitable(const struct sock *sk)
531{
532	return !!sk->sk_prot->psock_update_sk_prot;
533}
534
535static bool sock_map_sk_state_allowed(const struct sock *sk)
536{
537	if (sk_is_tcp(sk))
538		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
539	if (sk_is_stream_unix(sk))
540		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
541	return true;
542}
543
544static int sock_hash_update_common(struct bpf_map *map, void *key,
545				   struct sock *sk, u64 flags);
546
547int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
548			     u64 flags)
549{
550	struct socket *sock;
551	struct sock *sk;
552	int ret;
553	u64 ufd;
554
555	if (map->value_size == sizeof(u64))
556		ufd = *(u64 *)value;
557	else
558		ufd = *(u32 *)value;
559	if (ufd > S32_MAX)
560		return -EINVAL;
561
562	sock = sockfd_lookup(ufd, &ret);
563	if (!sock)
564		return ret;
565	sk = sock->sk;
566	if (!sk) {
567		ret = -EINVAL;
568		goto out;
569	}
570	if (!sock_map_sk_is_suitable(sk)) {
571		ret = -EOPNOTSUPP;
572		goto out;
573	}
574
575	sock_map_sk_acquire(sk);
576	if (!sock_map_sk_state_allowed(sk))
577		ret = -EOPNOTSUPP;
578	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
579		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
580	else
581		ret = sock_hash_update_common(map, key, sk, flags);
582	sock_map_sk_release(sk);
583out:
584	sockfd_put(sock);
585	return ret;
586}
587
588static long sock_map_update_elem(struct bpf_map *map, void *key,
589				 void *value, u64 flags)
590{
591	struct sock *sk = (struct sock *)value;
592	int ret;
593
594	if (unlikely(!sk || !sk_fullsock(sk)))
595		return -EINVAL;
596
597	if (!sock_map_sk_is_suitable(sk))
598		return -EOPNOTSUPP;
599
600	local_bh_disable();
601	bh_lock_sock(sk);
602	if (!sock_map_sk_state_allowed(sk))
603		ret = -EOPNOTSUPP;
604	else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
605		ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
606	else
607		ret = sock_hash_update_common(map, key, sk, flags);
608	bh_unlock_sock(sk);
609	local_bh_enable();
610	return ret;
611}
612
613BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
614	   struct bpf_map *, map, void *, key, u64, flags)
615{
616	WARN_ON_ONCE(!rcu_read_lock_held());
617
618	if (likely(sock_map_sk_is_suitable(sops->sk) &&
619		   sock_map_op_okay(sops)))
620		return sock_map_update_common(map, *(u32 *)key, sops->sk,
621					      flags);
622	return -EOPNOTSUPP;
623}
624
625const struct bpf_func_proto bpf_sock_map_update_proto = {
626	.func		= bpf_sock_map_update,
627	.gpl_only	= false,
628	.pkt_access	= true,
629	.ret_type	= RET_INTEGER,
630	.arg1_type	= ARG_PTR_TO_CTX,
631	.arg2_type	= ARG_CONST_MAP_PTR,
632	.arg3_type	= ARG_PTR_TO_MAP_KEY,
633	.arg4_type	= ARG_ANYTHING,
634};
635
636BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
637	   struct bpf_map *, map, u32, key, u64, flags)
638{
639	struct sock *sk;
640
641	if (unlikely(flags & ~(BPF_F_INGRESS)))
642		return SK_DROP;
643
644	sk = __sock_map_lookup_elem(map, key);
645	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
646		return SK_DROP;
647
648	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
649	return SK_PASS;
650}
651
652const struct bpf_func_proto bpf_sk_redirect_map_proto = {
653	.func           = bpf_sk_redirect_map,
654	.gpl_only       = false,
655	.ret_type       = RET_INTEGER,
656	.arg1_type	= ARG_PTR_TO_CTX,
657	.arg2_type      = ARG_CONST_MAP_PTR,
658	.arg3_type      = ARG_ANYTHING,
659	.arg4_type      = ARG_ANYTHING,
660};
661
662BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
663	   struct bpf_map *, map, u32, key, u64, flags)
664{
665	struct sock *sk;
666
667	if (unlikely(flags & ~(BPF_F_INGRESS)))
668		return SK_DROP;
669
670	sk = __sock_map_lookup_elem(map, key);
671	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
672		return SK_DROP;
673	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
674		return SK_DROP;
675
676	msg->flags = flags;
677	msg->sk_redir = sk;
678	return SK_PASS;
679}
680
681const struct bpf_func_proto bpf_msg_redirect_map_proto = {
682	.func           = bpf_msg_redirect_map,
683	.gpl_only       = false,
684	.ret_type       = RET_INTEGER,
685	.arg1_type	= ARG_PTR_TO_CTX,
686	.arg2_type      = ARG_CONST_MAP_PTR,
687	.arg3_type      = ARG_ANYTHING,
688	.arg4_type      = ARG_ANYTHING,
689};
690
691struct sock_map_seq_info {
692	struct bpf_map *map;
693	struct sock *sk;
694	u32 index;
695};
696
697struct bpf_iter__sockmap {
698	__bpf_md_ptr(struct bpf_iter_meta *, meta);
699	__bpf_md_ptr(struct bpf_map *, map);
700	__bpf_md_ptr(void *, key);
701	__bpf_md_ptr(struct sock *, sk);
702};
703
704DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
705		     struct bpf_map *map, void *key,
706		     struct sock *sk)
707
708static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
709{
710	if (unlikely(info->index >= info->map->max_entries))
711		return NULL;
712
713	info->sk = __sock_map_lookup_elem(info->map, info->index);
714
715	/* can't return sk directly, since that might be NULL */
716	return info;
717}
718
719static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
720	__acquires(rcu)
721{
722	struct sock_map_seq_info *info = seq->private;
723
724	if (*pos == 0)
725		++*pos;
726
727	/* pairs with sock_map_seq_stop */
728	rcu_read_lock();
729	return sock_map_seq_lookup_elem(info);
730}
731
732static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
733	__must_hold(rcu)
734{
735	struct sock_map_seq_info *info = seq->private;
736
737	++*pos;
738	++info->index;
739
740	return sock_map_seq_lookup_elem(info);
741}
742
743static int sock_map_seq_show(struct seq_file *seq, void *v)
744	__must_hold(rcu)
745{
746	struct sock_map_seq_info *info = seq->private;
747	struct bpf_iter__sockmap ctx = {};
748	struct bpf_iter_meta meta;
749	struct bpf_prog *prog;
750
751	meta.seq = seq;
752	prog = bpf_iter_get_info(&meta, !v);
753	if (!prog)
754		return 0;
755
756	ctx.meta = &meta;
757	ctx.map = info->map;
758	if (v) {
759		ctx.key = &info->index;
760		ctx.sk = info->sk;
761	}
762
763	return bpf_iter_run_prog(prog, &ctx);
764}
765
766static void sock_map_seq_stop(struct seq_file *seq, void *v)
767	__releases(rcu)
768{
769	if (!v)
770		(void)sock_map_seq_show(seq, NULL);
771
772	/* pairs with sock_map_seq_start */
773	rcu_read_unlock();
774}
775
776static const struct seq_operations sock_map_seq_ops = {
777	.start	= sock_map_seq_start,
778	.next	= sock_map_seq_next,
779	.stop	= sock_map_seq_stop,
780	.show	= sock_map_seq_show,
781};
782
783static int sock_map_init_seq_private(void *priv_data,
784				     struct bpf_iter_aux_info *aux)
785{
786	struct sock_map_seq_info *info = priv_data;
787
788	bpf_map_inc_with_uref(aux->map);
789	info->map = aux->map;
790	return 0;
791}
792
793static void sock_map_fini_seq_private(void *priv_data)
794{
795	struct sock_map_seq_info *info = priv_data;
796
797	bpf_map_put_with_uref(info->map);
798}
799
800static u64 sock_map_mem_usage(const struct bpf_map *map)
801{
802	u64 usage = sizeof(struct bpf_stab);
803
804	usage += (u64)map->max_entries * sizeof(struct sock *);
805	return usage;
806}
807
808static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
809	.seq_ops		= &sock_map_seq_ops,
810	.init_seq_private	= sock_map_init_seq_private,
811	.fini_seq_private	= sock_map_fini_seq_private,
812	.seq_priv_size		= sizeof(struct sock_map_seq_info),
813};
814
815BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab)
816const struct bpf_map_ops sock_map_ops = {
817	.map_meta_equal		= bpf_map_meta_equal,
818	.map_alloc		= sock_map_alloc,
819	.map_free		= sock_map_free,
820	.map_get_next_key	= sock_map_get_next_key,
821	.map_lookup_elem_sys_only = sock_map_lookup_sys,
822	.map_update_elem	= sock_map_update_elem,
823	.map_delete_elem	= sock_map_delete_elem,
824	.map_lookup_elem	= sock_map_lookup,
825	.map_release_uref	= sock_map_release_progs,
826	.map_check_btf		= map_check_no_btf,
827	.map_mem_usage		= sock_map_mem_usage,
828	.map_btf_id		= &sock_map_btf_ids[0],
829	.iter_seq_info		= &sock_map_iter_seq_info,
830};
831
832struct bpf_shtab_elem {
833	struct rcu_head rcu;
834	u32 hash;
835	struct sock *sk;
836	struct hlist_node node;
837	u8 key[];
838};
839
840struct bpf_shtab_bucket {
841	struct hlist_head head;
842	spinlock_t lock;
843};
844
845struct bpf_shtab {
846	struct bpf_map map;
847	struct bpf_shtab_bucket *buckets;
848	u32 buckets_num;
849	u32 elem_size;
850	struct sk_psock_progs progs;
851	atomic_t count;
852};
853
854static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
855{
856	return jhash(key, len, 0);
857}
858
859static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
860							u32 hash)
861{
862	return &htab->buckets[hash & (htab->buckets_num - 1)];
863}
864
865static struct bpf_shtab_elem *
866sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
867			  u32 key_size)
868{
869	struct bpf_shtab_elem *elem;
870
871	hlist_for_each_entry_rcu(elem, head, node) {
872		if (elem->hash == hash &&
873		    !memcmp(&elem->key, key, key_size))
874			return elem;
875	}
876
877	return NULL;
878}
879
880static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
881{
882	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
883	u32 key_size = map->key_size, hash;
884	struct bpf_shtab_bucket *bucket;
885	struct bpf_shtab_elem *elem;
886
887	WARN_ON_ONCE(!rcu_read_lock_held());
888
889	hash = sock_hash_bucket_hash(key, key_size);
890	bucket = sock_hash_select_bucket(htab, hash);
891	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
892
893	return elem ? elem->sk : NULL;
894}
895
896static void sock_hash_free_elem(struct bpf_shtab *htab,
897				struct bpf_shtab_elem *elem)
898{
899	atomic_dec(&htab->count);
900	kfree_rcu(elem, rcu);
901}
902
903static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
904				       void *link_raw)
905{
906	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
907	struct bpf_shtab_elem *elem_probe, *elem = link_raw;
908	struct bpf_shtab_bucket *bucket;
909
910	WARN_ON_ONCE(!rcu_read_lock_held());
911	bucket = sock_hash_select_bucket(htab, elem->hash);
912
913	/* elem may be deleted in parallel from the map, but access here
914	 * is okay since it's going away only after RCU grace period.
915	 * However, we need to check whether it's still present.
916	 */
917	spin_lock_bh(&bucket->lock);
918	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
919					       elem->key, map->key_size);
920	if (elem_probe && elem_probe == elem) {
921		hlist_del_rcu(&elem->node);
922		sock_map_unref(elem->sk, elem);
923		sock_hash_free_elem(htab, elem);
924	}
925	spin_unlock_bh(&bucket->lock);
926}
927
928static long sock_hash_delete_elem(struct bpf_map *map, void *key)
929{
930	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
931	u32 hash, key_size = map->key_size;
932	struct bpf_shtab_bucket *bucket;
933	struct bpf_shtab_elem *elem;
934	int ret = -ENOENT;
935
936	hash = sock_hash_bucket_hash(key, key_size);
937	bucket = sock_hash_select_bucket(htab, hash);
938
939	spin_lock_bh(&bucket->lock);
940	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
941	if (elem) {
942		hlist_del_rcu(&elem->node);
943		sock_map_unref(elem->sk, elem);
944		sock_hash_free_elem(htab, elem);
945		ret = 0;
946	}
947	spin_unlock_bh(&bucket->lock);
948	return ret;
949}
950
951static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
952						   void *key, u32 key_size,
953						   u32 hash, struct sock *sk,
954						   struct bpf_shtab_elem *old)
955{
956	struct bpf_shtab_elem *new;
957
958	if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
959		if (!old) {
960			atomic_dec(&htab->count);
961			return ERR_PTR(-E2BIG);
962		}
963	}
964
965	new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
966				   GFP_ATOMIC | __GFP_NOWARN,
967				   htab->map.numa_node);
968	if (!new) {
969		atomic_dec(&htab->count);
970		return ERR_PTR(-ENOMEM);
971	}
972	memcpy(new->key, key, key_size);
973	new->sk = sk;
974	new->hash = hash;
975	return new;
976}
977
978static int sock_hash_update_common(struct bpf_map *map, void *key,
979				   struct sock *sk, u64 flags)
980{
981	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
982	u32 key_size = map->key_size, hash;
983	struct bpf_shtab_elem *elem, *elem_new;
984	struct bpf_shtab_bucket *bucket;
985	struct sk_psock_link *link;
986	struct sk_psock *psock;
987	int ret;
988
989	WARN_ON_ONCE(!rcu_read_lock_held());
990	if (unlikely(flags > BPF_EXIST))
991		return -EINVAL;
992
993	link = sk_psock_init_link();
994	if (!link)
995		return -ENOMEM;
996
997	ret = sock_map_link(map, sk);
998	if (ret < 0)
999		goto out_free;
1000
1001	psock = sk_psock(sk);
1002	WARN_ON_ONCE(!psock);
1003
1004	hash = sock_hash_bucket_hash(key, key_size);
1005	bucket = sock_hash_select_bucket(htab, hash);
1006
1007	spin_lock_bh(&bucket->lock);
1008	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
1009	if (elem && flags == BPF_NOEXIST) {
1010		ret = -EEXIST;
1011		goto out_unlock;
1012	} else if (!elem && flags == BPF_EXIST) {
1013		ret = -ENOENT;
1014		goto out_unlock;
1015	}
1016
1017	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
1018	if (IS_ERR(elem_new)) {
1019		ret = PTR_ERR(elem_new);
1020		goto out_unlock;
1021	}
1022
1023	sock_map_add_link(psock, link, map, elem_new);
1024	/* Add new element to the head of the list, so that
1025	 * concurrent search will find it before old elem.
1026	 */
1027	hlist_add_head_rcu(&elem_new->node, &bucket->head);
1028	if (elem) {
1029		hlist_del_rcu(&elem->node);
1030		sock_map_unref(elem->sk, elem);
1031		sock_hash_free_elem(htab, elem);
1032	}
1033	spin_unlock_bh(&bucket->lock);
1034	return 0;
1035out_unlock:
1036	spin_unlock_bh(&bucket->lock);
1037	sk_psock_put(sk, psock);
1038out_free:
1039	sk_psock_free_link(link);
1040	return ret;
1041}
1042
1043static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1044				  void *key_next)
1045{
1046	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1047	struct bpf_shtab_elem *elem, *elem_next;
1048	u32 hash, key_size = map->key_size;
1049	struct hlist_head *head;
1050	int i = 0;
1051
1052	if (!key)
1053		goto find_first_elem;
1054	hash = sock_hash_bucket_hash(key, key_size);
1055	head = &sock_hash_select_bucket(htab, hash)->head;
1056	elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1057	if (!elem)
1058		goto find_first_elem;
1059
1060	elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1061				     struct bpf_shtab_elem, node);
1062	if (elem_next) {
1063		memcpy(key_next, elem_next->key, key_size);
1064		return 0;
1065	}
1066
1067	i = hash & (htab->buckets_num - 1);
1068	i++;
1069find_first_elem:
1070	for (; i < htab->buckets_num; i++) {
1071		head = &sock_hash_select_bucket(htab, i)->head;
1072		elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1073					     struct bpf_shtab_elem, node);
1074		if (elem_next) {
1075			memcpy(key_next, elem_next->key, key_size);
1076			return 0;
1077		}
1078	}
1079
1080	return -ENOENT;
1081}
1082
1083static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1084{
1085	struct bpf_shtab *htab;
1086	int i, err;
1087
1088	if (attr->max_entries == 0 ||
1089	    attr->key_size    == 0 ||
1090	    (attr->value_size != sizeof(u32) &&
1091	     attr->value_size != sizeof(u64)) ||
1092	    attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1093		return ERR_PTR(-EINVAL);
1094	if (attr->key_size > MAX_BPF_STACK)
1095		return ERR_PTR(-E2BIG);
1096
1097	htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
1098	if (!htab)
1099		return ERR_PTR(-ENOMEM);
1100
1101	bpf_map_init_from_attr(&htab->map, attr);
1102
1103	htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1104	htab->elem_size = sizeof(struct bpf_shtab_elem) +
1105			  round_up(htab->map.key_size, 8);
1106	if (htab->buckets_num == 0 ||
1107	    htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1108		err = -EINVAL;
1109		goto free_htab;
1110	}
1111
1112	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1113					   sizeof(struct bpf_shtab_bucket),
1114					   htab->map.numa_node);
1115	if (!htab->buckets) {
1116		err = -ENOMEM;
1117		goto free_htab;
1118	}
1119
1120	for (i = 0; i < htab->buckets_num; i++) {
1121		INIT_HLIST_HEAD(&htab->buckets[i].head);
1122		spin_lock_init(&htab->buckets[i].lock);
1123	}
1124
1125	return &htab->map;
1126free_htab:
1127	bpf_map_area_free(htab);
1128	return ERR_PTR(err);
1129}
1130
1131static void sock_hash_free(struct bpf_map *map)
1132{
1133	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1134	struct bpf_shtab_bucket *bucket;
1135	struct hlist_head unlink_list;
1136	struct bpf_shtab_elem *elem;
1137	struct hlist_node *node;
1138	int i;
1139
1140	/* After the sync no updates or deletes will be in-flight so it
1141	 * is safe to walk map and remove entries without risking a race
1142	 * in EEXIST update case.
1143	 */
1144	synchronize_rcu();
1145	for (i = 0; i < htab->buckets_num; i++) {
1146		bucket = sock_hash_select_bucket(htab, i);
1147
1148		/* We are racing with sock_hash_delete_from_link to
1149		 * enter the spin-lock critical section. Every socket on
1150		 * the list is still linked to sockhash. Since link
1151		 * exists, psock exists and holds a ref to socket. That
1152		 * lets us to grab a socket ref too.
1153		 */
1154		spin_lock_bh(&bucket->lock);
1155		hlist_for_each_entry(elem, &bucket->head, node)
1156			sock_hold(elem->sk);
1157		hlist_move_list(&bucket->head, &unlink_list);
1158		spin_unlock_bh(&bucket->lock);
1159
1160		/* Process removed entries out of atomic context to
1161		 * block for socket lock before deleting the psock's
1162		 * link to sockhash.
1163		 */
1164		hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1165			hlist_del(&elem->node);
1166			lock_sock(elem->sk);
1167			rcu_read_lock();
1168			sock_map_unref(elem->sk, elem);
1169			rcu_read_unlock();
1170			release_sock(elem->sk);
1171			sock_put(elem->sk);
1172			sock_hash_free_elem(htab, elem);
1173		}
1174	}
1175
1176	/* wait for psock readers accessing its map link */
1177	synchronize_rcu();
1178
1179	bpf_map_area_free(htab->buckets);
1180	bpf_map_area_free(htab);
1181}
1182
1183static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1184{
1185	struct sock *sk;
1186
1187	if (map->value_size != sizeof(u64))
1188		return ERR_PTR(-ENOSPC);
1189
1190	sk = __sock_hash_lookup_elem(map, key);
1191	if (!sk)
1192		return ERR_PTR(-ENOENT);
1193
1194	__sock_gen_cookie(sk);
1195	return &sk->sk_cookie;
1196}
1197
1198static void *sock_hash_lookup(struct bpf_map *map, void *key)
1199{
1200	struct sock *sk;
1201
1202	sk = __sock_hash_lookup_elem(map, key);
1203	if (!sk)
1204		return NULL;
1205	if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1206		return NULL;
1207	return sk;
1208}
1209
1210static void sock_hash_release_progs(struct bpf_map *map)
1211{
1212	psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1213}
1214
1215BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1216	   struct bpf_map *, map, void *, key, u64, flags)
1217{
1218	WARN_ON_ONCE(!rcu_read_lock_held());
1219
1220	if (likely(sock_map_sk_is_suitable(sops->sk) &&
1221		   sock_map_op_okay(sops)))
1222		return sock_hash_update_common(map, key, sops->sk, flags);
1223	return -EOPNOTSUPP;
1224}
1225
1226const struct bpf_func_proto bpf_sock_hash_update_proto = {
1227	.func		= bpf_sock_hash_update,
1228	.gpl_only	= false,
1229	.pkt_access	= true,
1230	.ret_type	= RET_INTEGER,
1231	.arg1_type	= ARG_PTR_TO_CTX,
1232	.arg2_type	= ARG_CONST_MAP_PTR,
1233	.arg3_type	= ARG_PTR_TO_MAP_KEY,
1234	.arg4_type	= ARG_ANYTHING,
1235};
1236
1237BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1238	   struct bpf_map *, map, void *, key, u64, flags)
1239{
1240	struct sock *sk;
1241
1242	if (unlikely(flags & ~(BPF_F_INGRESS)))
1243		return SK_DROP;
1244
1245	sk = __sock_hash_lookup_elem(map, key);
1246	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1247		return SK_DROP;
1248
1249	skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1250	return SK_PASS;
1251}
1252
1253const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1254	.func           = bpf_sk_redirect_hash,
1255	.gpl_only       = false,
1256	.ret_type       = RET_INTEGER,
1257	.arg1_type	= ARG_PTR_TO_CTX,
1258	.arg2_type      = ARG_CONST_MAP_PTR,
1259	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1260	.arg4_type      = ARG_ANYTHING,
1261};
1262
1263BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1264	   struct bpf_map *, map, void *, key, u64, flags)
1265{
1266	struct sock *sk;
1267
1268	if (unlikely(flags & ~(BPF_F_INGRESS)))
1269		return SK_DROP;
1270
1271	sk = __sock_hash_lookup_elem(map, key);
1272	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1273		return SK_DROP;
1274	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
1275		return SK_DROP;
1276
1277	msg->flags = flags;
1278	msg->sk_redir = sk;
1279	return SK_PASS;
1280}
1281
1282const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1283	.func           = bpf_msg_redirect_hash,
1284	.gpl_only       = false,
1285	.ret_type       = RET_INTEGER,
1286	.arg1_type	= ARG_PTR_TO_CTX,
1287	.arg2_type      = ARG_CONST_MAP_PTR,
1288	.arg3_type      = ARG_PTR_TO_MAP_KEY,
1289	.arg4_type      = ARG_ANYTHING,
1290};
1291
1292struct sock_hash_seq_info {
1293	struct bpf_map *map;
1294	struct bpf_shtab *htab;
1295	u32 bucket_id;
1296};
1297
1298static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1299				     struct bpf_shtab_elem *prev_elem)
1300{
1301	const struct bpf_shtab *htab = info->htab;
1302	struct bpf_shtab_bucket *bucket;
1303	struct bpf_shtab_elem *elem;
1304	struct hlist_node *node;
1305
1306	/* try to find next elem in the same bucket */
1307	if (prev_elem) {
1308		node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1309		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1310		if (elem)
1311			return elem;
1312
1313		/* no more elements, continue in the next bucket */
1314		info->bucket_id++;
1315	}
1316
1317	for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1318		bucket = &htab->buckets[info->bucket_id];
1319		node = rcu_dereference(hlist_first_rcu(&bucket->head));
1320		elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1321		if (elem)
1322			return elem;
1323	}
1324
1325	return NULL;
1326}
1327
1328static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1329	__acquires(rcu)
1330{
1331	struct sock_hash_seq_info *info = seq->private;
1332
1333	if (*pos == 0)
1334		++*pos;
1335
1336	/* pairs with sock_hash_seq_stop */
1337	rcu_read_lock();
1338	return sock_hash_seq_find_next(info, NULL);
1339}
1340
1341static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1342	__must_hold(rcu)
1343{
1344	struct sock_hash_seq_info *info = seq->private;
1345
1346	++*pos;
1347	return sock_hash_seq_find_next(info, v);
1348}
1349
1350static int sock_hash_seq_show(struct seq_file *seq, void *v)
1351	__must_hold(rcu)
1352{
1353	struct sock_hash_seq_info *info = seq->private;
1354	struct bpf_iter__sockmap ctx = {};
1355	struct bpf_shtab_elem *elem = v;
1356	struct bpf_iter_meta meta;
1357	struct bpf_prog *prog;
1358
1359	meta.seq = seq;
1360	prog = bpf_iter_get_info(&meta, !elem);
1361	if (!prog)
1362		return 0;
1363
1364	ctx.meta = &meta;
1365	ctx.map = info->map;
1366	if (elem) {
1367		ctx.key = elem->key;
1368		ctx.sk = elem->sk;
1369	}
1370
1371	return bpf_iter_run_prog(prog, &ctx);
1372}
1373
1374static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1375	__releases(rcu)
1376{
1377	if (!v)
1378		(void)sock_hash_seq_show(seq, NULL);
1379
1380	/* pairs with sock_hash_seq_start */
1381	rcu_read_unlock();
1382}
1383
1384static const struct seq_operations sock_hash_seq_ops = {
1385	.start	= sock_hash_seq_start,
1386	.next	= sock_hash_seq_next,
1387	.stop	= sock_hash_seq_stop,
1388	.show	= sock_hash_seq_show,
1389};
1390
1391static int sock_hash_init_seq_private(void *priv_data,
1392				      struct bpf_iter_aux_info *aux)
1393{
1394	struct sock_hash_seq_info *info = priv_data;
1395
1396	bpf_map_inc_with_uref(aux->map);
1397	info->map = aux->map;
1398	info->htab = container_of(aux->map, struct bpf_shtab, map);
1399	return 0;
1400}
1401
1402static void sock_hash_fini_seq_private(void *priv_data)
1403{
1404	struct sock_hash_seq_info *info = priv_data;
1405
1406	bpf_map_put_with_uref(info->map);
1407}
1408
1409static u64 sock_hash_mem_usage(const struct bpf_map *map)
1410{
1411	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1412	u64 usage = sizeof(*htab);
1413
1414	usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
1415	usage += atomic_read(&htab->count) * (u64)htab->elem_size;
1416	return usage;
1417}
1418
1419static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1420	.seq_ops		= &sock_hash_seq_ops,
1421	.init_seq_private	= sock_hash_init_seq_private,
1422	.fini_seq_private	= sock_hash_fini_seq_private,
1423	.seq_priv_size		= sizeof(struct sock_hash_seq_info),
1424};
1425
1426BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab)
1427const struct bpf_map_ops sock_hash_ops = {
1428	.map_meta_equal		= bpf_map_meta_equal,
1429	.map_alloc		= sock_hash_alloc,
1430	.map_free		= sock_hash_free,
1431	.map_get_next_key	= sock_hash_get_next_key,
1432	.map_update_elem	= sock_map_update_elem,
1433	.map_delete_elem	= sock_hash_delete_elem,
1434	.map_lookup_elem	= sock_hash_lookup,
1435	.map_lookup_elem_sys_only = sock_hash_lookup_sys,
1436	.map_release_uref	= sock_hash_release_progs,
1437	.map_check_btf		= map_check_no_btf,
1438	.map_mem_usage		= sock_hash_mem_usage,
1439	.map_btf_id		= &sock_hash_map_btf_ids[0],
1440	.iter_seq_info		= &sock_hash_iter_seq_info,
1441};
1442
1443static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1444{
1445	switch (map->map_type) {
1446	case BPF_MAP_TYPE_SOCKMAP:
1447		return &container_of(map, struct bpf_stab, map)->progs;
1448	case BPF_MAP_TYPE_SOCKHASH:
1449		return &container_of(map, struct bpf_shtab, map)->progs;
1450	default:
1451		break;
1452	}
1453
1454	return NULL;
1455}
1456
1457static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1458				u32 which)
1459{
1460	struct sk_psock_progs *progs = sock_map_progs(map);
1461
1462	if (!progs)
1463		return -EOPNOTSUPP;
1464
1465	switch (which) {
1466	case BPF_SK_MSG_VERDICT:
1467		*pprog = &progs->msg_parser;
1468		break;
1469#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1470	case BPF_SK_SKB_STREAM_PARSER:
1471		*pprog = &progs->stream_parser;
1472		break;
1473#endif
1474	case BPF_SK_SKB_STREAM_VERDICT:
1475		if (progs->skb_verdict)
1476			return -EBUSY;
1477		*pprog = &progs->stream_verdict;
1478		break;
1479	case BPF_SK_SKB_VERDICT:
1480		if (progs->stream_verdict)
1481			return -EBUSY;
1482		*pprog = &progs->skb_verdict;
1483		break;
1484	default:
1485		return -EOPNOTSUPP;
1486	}
1487
1488	return 0;
1489}
1490
1491static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1492				struct bpf_prog *old, u32 which)
1493{
1494	struct bpf_prog **pprog;
1495	int ret;
1496
1497	ret = sock_map_prog_lookup(map, &pprog, which);
1498	if (ret)
1499		return ret;
1500
1501	if (old)
1502		return psock_replace_prog(pprog, prog, old);
1503
1504	psock_set_prog(pprog, prog);
1505	return 0;
1506}
1507
1508int sock_map_bpf_prog_query(const union bpf_attr *attr,
1509			    union bpf_attr __user *uattr)
1510{
1511	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1512	u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1513	struct bpf_prog **pprog;
1514	struct bpf_prog *prog;
1515	struct bpf_map *map;
1516	struct fd f;
1517	u32 id = 0;
1518	int ret;
1519
1520	if (attr->query.query_flags)
1521		return -EINVAL;
1522
1523	f = fdget(ufd);
1524	map = __bpf_map_get(f);
1525	if (IS_ERR(map))
1526		return PTR_ERR(map);
1527
1528	rcu_read_lock();
1529
1530	ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1531	if (ret)
1532		goto end;
1533
1534	prog = *pprog;
1535	prog_cnt = !prog ? 0 : 1;
1536
1537	if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1538		goto end;
1539
1540	/* we do not hold the refcnt, the bpf prog may be released
1541	 * asynchronously and the id would be set to 0.
1542	 */
1543	id = data_race(prog->aux->id);
1544	if (id == 0)
1545		prog_cnt = 0;
1546
1547end:
1548	rcu_read_unlock();
1549
1550	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1551	    (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1552	    copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1553		ret = -EFAULT;
1554
1555	fdput(f);
1556	return ret;
1557}
1558
1559static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1560{
1561	switch (link->map->map_type) {
1562	case BPF_MAP_TYPE_SOCKMAP:
1563		return sock_map_delete_from_link(link->map, sk,
1564						 link->link_raw);
1565	case BPF_MAP_TYPE_SOCKHASH:
1566		return sock_hash_delete_from_link(link->map, sk,
1567						  link->link_raw);
1568	default:
1569		break;
1570	}
1571}
1572
1573static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1574{
1575	struct sk_psock_link *link;
1576
1577	while ((link = sk_psock_link_pop(psock))) {
1578		sock_map_unlink(sk, link);
1579		sk_psock_free_link(link);
1580	}
1581}
1582
1583void sock_map_unhash(struct sock *sk)
1584{
1585	void (*saved_unhash)(struct sock *sk);
1586	struct sk_psock *psock;
1587
1588	rcu_read_lock();
1589	psock = sk_psock(sk);
1590	if (unlikely(!psock)) {
1591		rcu_read_unlock();
1592		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
1593	} else {
1594		saved_unhash = psock->saved_unhash;
1595		sock_map_remove_links(sk, psock);
1596		rcu_read_unlock();
1597	}
1598	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
1599		return;
1600	if (saved_unhash)
1601		saved_unhash(sk);
1602}
1603EXPORT_SYMBOL_GPL(sock_map_unhash);
1604
1605void sock_map_destroy(struct sock *sk)
1606{
1607	void (*saved_destroy)(struct sock *sk);
1608	struct sk_psock *psock;
1609
1610	rcu_read_lock();
1611	psock = sk_psock_get(sk);
1612	if (unlikely(!psock)) {
1613		rcu_read_unlock();
1614		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
1615	} else {
1616		saved_destroy = psock->saved_destroy;
1617		sock_map_remove_links(sk, psock);
1618		rcu_read_unlock();
1619		sk_psock_stop(psock);
1620		sk_psock_put(sk, psock);
1621	}
1622	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
1623		return;
1624	if (saved_destroy)
1625		saved_destroy(sk);
1626}
1627EXPORT_SYMBOL_GPL(sock_map_destroy);
1628
1629void sock_map_close(struct sock *sk, long timeout)
1630{
1631	void (*saved_close)(struct sock *sk, long timeout);
1632	struct sk_psock *psock;
1633
1634	lock_sock(sk);
1635	rcu_read_lock();
1636	psock = sk_psock_get(sk);
1637	if (unlikely(!psock)) {
1638		rcu_read_unlock();
1639		release_sock(sk);
1640		saved_close = READ_ONCE(sk->sk_prot)->close;
1641	} else {
1642		saved_close = psock->saved_close;
1643		sock_map_remove_links(sk, psock);
1644		rcu_read_unlock();
1645		sk_psock_stop(psock);
1646		release_sock(sk);
1647		cancel_delayed_work_sync(&psock->work);
1648		sk_psock_put(sk, psock);
1649	}
1650
1651	/* Make sure we do not recurse. This is a bug.
1652	 * Leak the socket instead of crashing on a stack overflow.
1653	 */
1654	if (WARN_ON_ONCE(saved_close == sock_map_close))
1655		return;
1656	saved_close(sk, timeout);
1657}
1658EXPORT_SYMBOL_GPL(sock_map_close);
1659
1660static int sock_map_iter_attach_target(struct bpf_prog *prog,
1661				       union bpf_iter_link_info *linfo,
1662				       struct bpf_iter_aux_info *aux)
1663{
1664	struct bpf_map *map;
1665	int err = -EINVAL;
1666
1667	if (!linfo->map.map_fd)
1668		return -EBADF;
1669
1670	map = bpf_map_get_with_uref(linfo->map.map_fd);
1671	if (IS_ERR(map))
1672		return PTR_ERR(map);
1673
1674	if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1675	    map->map_type != BPF_MAP_TYPE_SOCKHASH)
1676		goto put_map;
1677
1678	if (prog->aux->max_rdonly_access > map->key_size) {
1679		err = -EACCES;
1680		goto put_map;
1681	}
1682
1683	aux->map = map;
1684	return 0;
1685
1686put_map:
1687	bpf_map_put_with_uref(map);
1688	return err;
1689}
1690
1691static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1692{
1693	bpf_map_put_with_uref(aux->map);
1694}
1695
1696static struct bpf_iter_reg sock_map_iter_reg = {
1697	.target			= "sockmap",
1698	.attach_target		= sock_map_iter_attach_target,
1699	.detach_target		= sock_map_iter_detach_target,
1700	.show_fdinfo		= bpf_iter_map_show_fdinfo,
1701	.fill_link_info		= bpf_iter_map_fill_link_info,
1702	.ctx_arg_info_size	= 2,
1703	.ctx_arg_info		= {
1704		{ offsetof(struct bpf_iter__sockmap, key),
1705		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1706		{ offsetof(struct bpf_iter__sockmap, sk),
1707		  PTR_TO_BTF_ID_OR_NULL },
1708	},
1709};
1710
1711static int __init bpf_sockmap_iter_init(void)
1712{
1713	sock_map_iter_reg.ctx_arg_info[1].btf_id =
1714		btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1715	return bpf_iter_reg_target(&sock_map_iter_reg);
1716}
1717late_initcall(bpf_sockmap_iter_init);
1718