Lines Matching refs:xfrm
35 #include <net/xfrm.h>
77 /* xfrm inexact policy search tree:
459 return __idx_hash(index, net->xfrm.policy_idx_hmask);
469 *dbits = net->xfrm.policy_bydst[dir].dbits4;
470 *sbits = net->xfrm.policy_bydst[dir].sbits4;
474 *dbits = net->xfrm.policy_bydst[dir].dbits6;
475 *sbits = net->xfrm.policy_bydst[dir].sbits6;
488 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
499 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
500 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
508 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
516 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
517 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
579 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
589 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
590 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
592 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
593 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
598 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
599 net->xfrm.policy_bydst[dir].hmask = nhashmask;
601 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
602 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
611 unsigned int hmask = net->xfrm.policy_idx_hmask;
614 struct hlist_head *oidx = net->xfrm.policy_byidx;
621 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
626 net->xfrm.policy_byidx = nidx;
627 net->xfrm.policy_idx_hmask = nhashmask;
629 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
636 unsigned int cnt = net->xfrm.policy_count[dir];
637 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
651 unsigned int hmask = net->xfrm.policy_idx_hmask;
662 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
663 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
664 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
665 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
666 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
667 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
668 si->spdhcnt = net->xfrm.policy_idx_hmask;
676 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
708 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
724 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
730 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
850 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1111 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1113 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1120 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1122 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1134 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1199 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1213 chain = &net->xfrm.policy_inexact[dir];
1225 xfrm.policy_hthresh.work);
1241 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1243 lbits4 = net->xfrm.policy_hthresh.lbits4;
1244 rbits4 = net->xfrm.policy_hthresh.rbits4;
1245 lbits6 = net->xfrm.policy_hthresh.lbits6;
1246 rbits6 = net->xfrm.policy_hthresh.rbits6;
1247 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1249 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1250 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1255 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1301 &net->xfrm.policy_inexact[dir],
1307 hmask = net->xfrm.policy_bydst[dir].hmask;
1308 odst = net->xfrm.policy_bydst[dir].table;
1315 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1316 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1317 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1318 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1321 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1322 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1323 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1324 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1329 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1362 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1363 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1370 schedule_work(&net->xfrm.policy_hthresh.work);
1385 idx = (net->xfrm.idx_generator | dir);
1386 net->xfrm.idx_generator += 8;
1394 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1583 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1591 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1608 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1613 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1618 schedule_work(&net->xfrm.policy_hash_work);
1656 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1665 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1672 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1699 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1706 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1728 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1729 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1739 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1748 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1763 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1785 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1819 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1826 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1836 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1841 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1849 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1860 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1867 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1877 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1882 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1890 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1910 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1912 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1917 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1938 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1957 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1959 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2082 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2175 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2177 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2211 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2285 list_add(&pol->walk.all, &net->xfrm.policy_all);
2286 net->xfrm.policy_count[dir]++;
2306 net->xfrm.policy_count[dir]--;
2325 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2327 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2347 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2349 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2365 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2397 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2399 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2445 struct xfrm_state **xfrm, unsigned short family)
2478 xfrm[nx++] = x;
2498 xfrm_state_put(xfrm[nx]);
2504 struct xfrm_state **xfrm, unsigned short family)
2507 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2529 xfrm_state_sort(xfrm, tpp, cnx, family);
2559 dst_ops = &net->xfrm.xfrm4_dst_ops;
2563 dst_ops = &net->xfrm.xfrm6_dst_ops;
2609 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2614 struct xfrm_state **xfrm,
2661 if (xfrm[i]->sel.family == AF_UNSPEC) {
2662 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2670 inner_mode = &xfrm[i]->inner_mode;
2675 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2679 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2680 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2682 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2683 family = xfrm[i]->props.family;
2686 dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2694 dst1->xfrm = xfrm[i];
2695 xdst->xfrm_genid = xfrm[i]->genid;
2712 header_len += xfrm[i]->props.header_len;
2713 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2714 nfheader_len += xfrm[i]->props.header_len;
2715 trailer_len += xfrm[i]->props.trailer_len;
2737 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2738 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2745 xfrm_state_put(xfrm[i]);
2807 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2814 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2824 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2985 net->xfrm.sysctl_larval_drop ||
3177 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3202 if (net->xfrm.sysctl_larval_drop) {
3242 if (dst && dst->xfrm &&
3243 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3249 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3668 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3737 /* For each tunnel xfrm, find the first matching tmpl.
3738 * For each tmpl before that, find corresponding xfrm.
3843 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3878 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3913 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3915 if (xdst->xfrm_genid != dst->xfrm->genid)
3938 } while (dst->xfrm);
3948 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3977 while (dst->xfrm) {
3978 const struct xfrm_state *xfrm = dst->xfrm;
3982 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3984 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3985 daddr = xfrm->coaddr;
3986 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3987 daddr = &xfrm->id.daddr;
4130 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4131 if (!net->xfrm.policy_byidx)
4133 net->xfrm.policy_idx_hmask = hmask;
4138 net->xfrm.policy_count[dir] = 0;
4139 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4140 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4142 htab = &net->xfrm.policy_bydst[dir];
4152 net->xfrm.policy_hthresh.lbits4 = 32;
4153 net->xfrm.policy_hthresh.rbits4 = 32;
4154 net->xfrm.policy_hthresh.lbits6 = 128;
4155 net->xfrm.policy_hthresh.rbits6 = 128;
4157 seqlock_init(&net->xfrm.policy_hthresh.lock);
4159 INIT_LIST_HEAD(&net->xfrm.policy_all);
4160 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4161 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4162 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4169 htab = &net->xfrm.policy_bydst[dir];
4172 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4183 flush_work(&net->xfrm.policy_hash_work);
4189 WARN_ON(!list_empty(&net->xfrm.policy_all));
4194 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4196 htab = &net->xfrm.policy_bydst[dir];
4202 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4203 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4204 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4206 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4207 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4209 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4217 spin_lock_init(&net->xfrm.xfrm_state_lock);
4218 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4219 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4220 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4221 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4222 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4223 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4368 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4379 chain = &net->xfrm.policy_inexact[dir];
4394 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);