Lines Matching refs:pctx
86 static void pdp_context_delete(struct pdp_ctx *pctx);
153 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
164 return iph->daddr == pctx->ms_addr_ip4.s_addr;
166 return iph->saddr == pctx->ms_addr_ip4.s_addr;
172 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
177 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
182 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
185 if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
186 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
192 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
195 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
203 skb->dev = pctx->dev;
205 dev_sw_netstats_rx_add(pctx->dev, skb->len);
217 struct pdp_ctx *pctx;
230 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
231 if (!pctx) {
236 return gtp_rx(pctx, skb, hdrlen, gtp->role);
244 struct pdp_ctx *pctx;
272 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
273 if (!pctx) {
278 return gtp_rx(pctx, skb, hdrlen, gtp->role);
400 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
410 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
411 gtp0->flow = htons(pctx->u.v0.flow);
414 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
417 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
433 gtp1->tid = htonl(pctx->u.v1.o_tei);
445 struct pdp_ctx *pctx;
452 switch (pktinfo->pctx->gtp_version) {
455 gtp0_push_header(skb, pktinfo->pctx);
459 gtp1_push_header(skb, pktinfo->pctx);
466 struct pdp_ctx *pctx, struct rtable *rt,
472 pktinfo->pctx = pctx;
482 struct pdp_ctx *pctx;
494 pctx = ipv4_pdp_find(gtp, iph->saddr);
496 pctx = ipv4_pdp_find(gtp, iph->daddr);
498 if (!pctx) {
503 netdev_dbg(dev, "found PDP context %p\n", pctx);
505 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
508 &pctx->peer_addr_ip4.s_addr);
515 &pctx->peer_addr_ip4.s_addr);
527 switch (pctx->gtp_version) {
550 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
706 struct pdp_ctx *pctx;
710 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
711 pdp_context_delete(pctx);
903 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
905 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
906 pctx->af = AF_INET;
907 pctx->peer_addr_ip4.s_addr =
909 pctx->ms_addr_ip4.s_addr =
912 switch (pctx->gtp_version) {
918 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
919 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
922 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
923 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
933 struct pdp_ctx *pctx, *pctx_tid = NULL;
944 pctx = ipv4_pdp_find(gtp, ms_addr);
945 if (pctx)
962 if (pctx && pctx_tid)
964 if (!pctx)
965 pctx = pctx_tid;
967 ipv4_pdp_fill(pctx, info);
969 if (pctx->gtp_version == GTP_V0)
971 pctx->u.v0.tid, pctx);
972 else if (pctx->gtp_version == GTP_V1)
974 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
976 return pctx;
980 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
981 if (pctx == NULL)
985 pctx->sk = sk;
986 pctx->dev = gtp->dev;
987 ipv4_pdp_fill(pctx, info);
988 atomic_set(&pctx->tx_seq, 0);
990 switch (pctx->gtp_version) {
997 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
1000 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
1004 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]);
1005 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]);
1007 switch (pctx->gtp_version) {
1010 pctx->u.v0.tid, &pctx->peer_addr_ip4,
1011 &pctx->ms_addr_ip4, pctx);
1015 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1016 &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
1020 return pctx;
1025 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1027 sock_put(pctx->sk);
1028 kfree(pctx);
1031 static void pdp_context_delete(struct pdp_ctx *pctx)
1033 hlist_del_rcu(&pctx->hlist_tid);
1034 hlist_del_rcu(&pctx->hlist_addr);
1035 call_rcu(&pctx->rcu_head, pdp_context_free);
1038 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1043 struct pdp_ctx *pctx;
1092 pctx = gtp_pdp_add(gtp, sk, info);
1093 if (IS_ERR(pctx)) {
1094 err = PTR_ERR(pctx);
1096 gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
1132 struct pdp_ctx *pctx;
1135 pctx = gtp_find_pdp_by_link(net, nla);
1137 pctx = ERR_PTR(-EINVAL);
1139 if (!pctx)
1140 pctx = ERR_PTR(-ENOENT);
1142 return pctx;
1147 struct pdp_ctx *pctx;
1155 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1156 if (IS_ERR(pctx)) {
1157 err = PTR_ERR(pctx);
1161 if (pctx->gtp_version == GTP_V0)
1162 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1163 pctx->u.v0.tid, pctx);
1164 else if (pctx->gtp_version == GTP_V1)
1165 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1166 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1168 gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
1169 pdp_context_delete(pctx);
1187 int flags, u32 type, struct pdp_ctx *pctx)
1196 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1197 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
1198 nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
1199 nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1202 switch (pctx->gtp_version) {
1204 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
1205 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
1209 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
1210 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
1223 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
1232 ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
1238 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg,
1245 struct pdp_ctx *pctx = NULL;
1254 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1255 if (IS_ERR(pctx)) {
1256 err = PTR_ERR(pctx);
1267 0, info->nlhdr->nlmsg_type, pctx);
1287 struct pdp_ctx *pctx;
1304 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
1311 cb->nlh->nlmsg_type, pctx)) {