1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef NET_FIREWALL_H
16 #define NET_FIREWALL_H
17 
18 #include <linux/bpf.h>
19 
20 #include "netfirewall_def.h"
21 #include "netfirewall_match.h"
22 #include "netfirewall_ct.h"
23 #include "netfirewall_event.h"
24 
25 #define FIREWALL_DNS_QUERY_PORT         53
26 #define FIREWALL_DNS_OVER_QUERY_PORT    853
27 
28 /**
29  * @brief if tcp socket was intercepted, need send reset packet to peer
30  *
31  * @param tuple match tuple of skb meta data
32  * @param skb struct __sk_buff
33  * @param dir enum stream_dir
34  * @return 0 if no error, -1 if an error occurred
35  */
send_sock_tcp_reset(struct match_tuple *tuple, struct __sk_buff *skb, enum stream_dir dir)36 static __always_inline int send_sock_tcp_reset(struct match_tuple *tuple, struct __sk_buff *skb, enum stream_dir dir)
37 {
38     if (!skb || !tuple) {
39         return -1;
40     }
41     if (tuple->protocol == IPPROTO_TCP) {
42         if (dir == INGRESS) {
43             bpf_sock_tcp_send_reset(skb);
44         }
45         return bpf_sock_destroy(skb);
46     }
47     return -1;
48 }
49 
50 /**
51  * @brief Get the packet rst on tuple
52  *
53  * @param tuple struct match_tuple
54  * @return true if success or false if an error occurred
55  */
get_packet_rst_flag(struct match_tuple *tuple)56 static __always_inline bool get_packet_rst_flag(struct match_tuple *tuple)
57 {
58     if (!tuple) {
59         return false;
60     }
61 
62     if (tuple->rst == 1) {
63         return true;
64     }
65 
66     return false;
67 }
68 
69 /**
70  * @brief Get the ct tuple from match tuple
71  *
72  * @param match_tpl struct match_tuple
73  * @param ct_tpl struct ct_tuple
74  * @return true if success or false if an error occurred
75  */
get_ct_tuple(struct match_tuple *match_tpl, struct ct_tuple *ct_tpl)76 static __always_inline bool get_ct_tuple(struct match_tuple *match_tpl, struct ct_tuple *ct_tpl)
77 {
78     if (!match_tpl || !ct_tpl) {
79         return false;
80     }
81 
82     ct_tpl->family = match_tpl->family;
83     ct_tpl->protocol = match_tpl->protocol;
84     ct_tpl->sport = match_tpl->sport;
85     ct_tpl->dport = match_tpl->dport;
86 
87     if (match_tpl->family == AF_INET) {
88         ct_tpl->ipv4.saddr = match_tpl->ipv4.saddr;
89         ct_tpl->ipv4.daddr = match_tpl->ipv4.daddr;
90     } else {
91         ct_tpl->ipv6.saddr = match_tpl->ipv6.saddr;
92         ct_tpl->ipv6.daddr = match_tpl->ipv6.daddr;
93     }
94 
95     return true;
96 }
97 
98 /**
99  * @brief Determine ingress packet drop or not
100  *
101  * @param skb struct __sk_buff
102  * @return SK_DROP if intercepted or SK_PASS if not
103  */
netfirewall_policy_ingress(struct __sk_buff *skb)104 static __always_inline enum sk_action netfirewall_policy_ingress(struct __sk_buff *skb)
105 {
106     struct match_tuple tuple = { 0 };
107     if (!get_match_tuple(skb, &tuple, INGRESS)) {
108         return SK_PASS;
109     }
110 
111     log_tuple(&tuple);
112 
113     struct ct_tuple ct_tpl = {};
114     if (!get_ct_tuple(&tuple, &ct_tpl)) {
115         return SK_PASS;
116     }
117 
118     enum ct_status status = ct_lookup_entry(skb, &ct_tpl, CT_INGRESS);
119     log_dbg(DBG_CT_LOOKUP, INGRESS, status);
120     if (status != CT_NEW) {
121         return SK_PASS;
122     }
123 
124     if (get_packet_rst_flag(&tuple)) {
125         return SK_PASS;
126     }
127 
128     struct bitmap key = { 0 };
129     if (!match_action_key(&tuple, &key)) {
130         return SK_PASS;
131     }
132 
133     if (match_action(&tuple, &key) != SK_PASS) {
134         log_intercept(&tuple);
135         send_sock_tcp_reset(&tuple, skb, INGRESS);
136         return SK_DROP;
137     }
138 
139     if (status == CT_NEW) {
140         ct_create_entry(&ct_tpl, skb, CT_INGRESS);
141     }
142 
143     return SK_PASS;
144 }
145 
MatchDnsQuery(const struct match_tuple *tuple)146 static __always_inline bool MatchDnsQuery(const struct match_tuple *tuple)
147 {
148     __be16 port = bpf_htons(tuple->sport);
149     if (port == FIREWALL_DNS_QUERY_PORT || port == FIREWALL_DNS_OVER_QUERY_PORT) {
150         default_action_key key = DEFAULT_ACT_OUT_KEY;
151         enum sk_action *action = bpf_map_lookup_elem(&DEFAULT_ACTION_MAP, &key);
152         return action && *action != SK_PASS;
153     }
154     return false;
155 }
156 
157 /**
158  * @brief Determine egress packet drop or not
159  *
160  * @param skb struct __sk_buff
161  * @return SK_DROP if intercepted or SK_PASS if not
162  */
netfirewall_policy_egress(struct __sk_buff *skb)163 static __always_inline enum sk_action netfirewall_policy_egress(struct __sk_buff *skb)
164 {
165     struct match_tuple tuple = { 0 };
166     if (!get_match_tuple(skb, &tuple, EGRESS)) {
167         return SK_PASS;
168     }
169 
170     log_tuple(&tuple);
171 
172     if (get_packet_rst_flag(&tuple)) {
173         return SK_PASS;
174     }
175 
176     struct ct_tuple ct_tpl = {};
177     if (!get_ct_tuple(&tuple, &ct_tpl)) {
178         return SK_PASS;
179     }
180 
181     enum ct_status status = ct_lookup_entry(skb, &ct_tpl, CT_EGRESS);
182     log_dbg(DBG_CT_LOOKUP, EGRESS, status);
183     if (status != CT_NEW) {
184         return SK_PASS;
185     }
186 
187     if (get_packet_rst_flag(&tuple)) {
188         return SK_PASS;
189     }
190 
191     struct bitmap key = { 0 };
192     if (!match_action_key(&tuple, &key)) {
193         return SK_PASS;
194     }
195     // Outbound DNS queries need to be released
196     if (!MatchDnsQuery(&tuple) && match_action(&tuple, &key) != SK_PASS) {
197         log_intercept(&tuple);
198         send_sock_tcp_reset(&tuple, skb, EGRESS);
199         return SK_DROP;
200     }
201 
202     if (status == CT_NEW) {
203         ct_create_entry(&ct_tpl, skb, CT_EGRESS);
204     }
205 
206     return SK_PASS;
207 }
208 
209 #endif // NET_FIREWALL_H