/kernel/linux/linux-6.6/net/core/ |
H A D | gro.c | 106 int segs; in skb_gro_receive() local 133 segs = NAPI_GRO_CB(skb)->count; in skb_gro_receive() 222 NAPI_GRO_CB(p)->count += segs; in skb_gro_receive()
|
/third_party/skia/samplecode/ |
H A D | SamplePatch.cpp | 67 static void eval_patch_edge(const SkPoint cubic[], SkPoint samples[], int segs) { in eval_patch_edge() argument 69 SkScalar dt = SK_Scalar1 / segs; in eval_patch_edge() 72 for (int i = 1; i < segs; i++) { in eval_patch_edge()
|
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | af_inet.c | 1349 struct sk_buff *segs = ERR_PTR(-EINVAL); in inet_gso_segment() local 1383 segs = ERR_PTR(-EPROTONOSUPPORT); in inet_gso_segment() 1396 segs = ops->callbacks.gso_segment(skb, features); in inet_gso_segment() 1397 if (!segs) in inet_gso_segment() 1401 if (IS_ERR_OR_NULL(segs)) in inet_gso_segment() 1404 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in inet_gso_segment() 1406 skb = segs; in inet_gso_segment() 1441 return segs; in inet_gso_segment()
|
H A D | ip_output.c | 252 struct sk_buff *segs, *nskb; in ip_finish_output_gso() local 276 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in ip_finish_output_gso() 277 if (IS_ERR_OR_NULL(segs)) { in ip_finish_output_gso() 284 skb_list_walk_safe(segs, segs, nskb) { in ip_finish_output_gso() 287 skb_mark_not_on_list(segs); in ip_finish_output_gso() 288 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); in ip_finish_output_gso()
|
H A D | tcp_bbr.c | 304 u32 segs, bytes; in bbr_tso_segs_goal() local 312 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); in bbr_tso_segs_goal() 314 return min(segs, 0x7FU); in bbr_tso_segs_goal()
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | af_inet.c | 1371 struct sk_buff *segs = ERR_PTR(-EINVAL); in inet_gso_segment() local 1405 segs = ERR_PTR(-EPROTONOSUPPORT); in inet_gso_segment() 1418 segs = ops->callbacks.gso_segment(skb, features); in inet_gso_segment() 1419 if (!segs) in inet_gso_segment() 1423 if (IS_ERR_OR_NULL(segs)) in inet_gso_segment() 1426 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in inet_gso_segment() 1428 skb = segs; in inet_gso_segment() 1463 return segs; in inet_gso_segment()
|
H A D | ip_output.c | 248 struct sk_buff *segs, *nskb; in ip_finish_output_gso() local 272 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in ip_finish_output_gso() 273 if (IS_ERR_OR_NULL(segs)) { in ip_finish_output_gso() 280 skb_list_walk_safe(segs, segs, nskb) { in ip_finish_output_gso() 283 skb_mark_not_on_list(segs); in ip_finish_output_gso() 284 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); in ip_finish_output_gso()
|
H A D | tcp_bbr.c | 306 u32 segs, bytes; in bbr_tso_segs_goal() local 314 segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); in bbr_tso_segs_goal() 316 return min(segs, 0x7FU); in bbr_tso_segs_goal()
|
/kernel/linux/linux-5.10/arch/x86/kvm/vmx/ |
H A D | vmx.c | 2814 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode() 2815 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode() 2816 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode() 2817 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode() 2818 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode() 2819 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode() 2823 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode() 2835 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode() 2836 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode() 2837 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_E in enter_pmode() [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/vmx/ |
H A D | vmx.c | 3011 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode() 3012 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode() 3013 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode() 3014 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode() 3015 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode() 3016 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode() 3020 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode() 3032 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode() 3033 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode() 3034 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_E in enter_pmode() [all...] |
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nfnetlink_queue.c | 780 struct sk_buff *skb, *segs, *nskb; in nfqnl_enqueue_packet() local 808 segs = skb_gso_segment(skb, 0); in nfqnl_enqueue_packet() 813 if (IS_ERR_OR_NULL(segs)) in nfqnl_enqueue_packet() 817 skb_list_walk_safe(segs, segs, nskb) { in nfqnl_enqueue_packet() 820 segs, entry); in nfqnl_enqueue_packet() 824 kfree_skb(segs); in nfqnl_enqueue_packet()
|
/kernel/linux/linux-6.6/fs/nfs/ |
H A D | nfs42xdr.c | 1049 struct read_plus_segment *segs; in decode_read_plus() local 1067 segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL); in decode_read_plus() 1068 if (!segs) in decode_read_plus() 1072 status = decode_read_plus_segment(xdr, &segs[i]); in decode_read_plus() 1079 res->count += process_read_plus_segment(xdr, args, res, &segs[i-1]); in decode_read_plus() 1083 kfree(segs); in decode_read_plus()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | nfnetlink_queue.c | 808 struct sk_buff *skb, *segs, *nskb; in nfqnl_enqueue_packet() local 836 segs = skb_gso_segment(skb, 0); in nfqnl_enqueue_packet() 841 if (IS_ERR_OR_NULL(segs)) in nfqnl_enqueue_packet() 845 skb_list_walk_safe(segs, segs, nskb) { in nfqnl_enqueue_packet() 848 segs, entry); in nfqnl_enqueue_packet() 852 kfree_skb(segs); in nfqnl_enqueue_packet()
|
/third_party/vk-gl-cts/external/amber/src/src/amberscript/ |
H A D | parser.cc | 202 const auto& segs = fmt->GetSegments(); in ParseBufferData() local 222 while (segs[seg_idx].IsPadding()) { in ParseBufferData() 224 if (seg_idx >= segs.size()) in ParseBufferData() 229 if (type::Type::IsFloat(segs[seg_idx].GetFormatMode())) { in ParseBufferData() 247 if (seg_idx >= segs.size()) in ParseBufferData() 253 while (segs[seg_idx].IsPadding()) { in ParseBufferData() 255 if (seg_idx >= segs.size()) in ParseBufferData() 2914 const auto& segs = fmt->GetSegments(); in ParseValues() local 2919 while (segs[seg_idx].IsPadding()) { in ParseValues() 2921 if (seg_idx >= segs in ParseValues() [all...] |
/kernel/linux/linux-6.6/include/net/ |
H A D | gro.h | 447 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) in gro_normal_one() argument 450 napi->rx_count += segs; in gro_normal_one()
|
/kernel/linux/linux-6.6/tools/testing/selftests/net/ |
H A D | srv6_hl2encap_red_l2vpn_test.sh | 422 encap seg6 mode "${mode}" segs "${policy}" \ 427 encap seg6 mode "${mode}" segs "${policy}" \
|
H A D | srv6_hencap_red_l3vpn_test.sh | 484 encap seg6 mode "${mode}" segs "${policy}" \ 495 encap seg6 mode "${mode}" segs "${policy}" \
|
H A D | srv6_end_dt46_l3vpn_test.sh | 137 # |cafe::2 |apply seg6 encap segs fc00:12:100::6046| 141 # |10.0.0.2 |apply seg6 encap segs fc00:12:100::6046| 150 # |cafe::4 |apply seg6 encap segs fc00:12:200::6046| 154 # |10.0.0.4 |apply seg6 encap segs fc00:12:200::6046| 173 # |cafe::1 |apply seg6 encap segs fc00:21:100::6046| 177 # |10.0.0.1 |apply seg6 encap segs fc00:21:100::6046| 186 # |cafe::3 |apply seg6 encap segs fc00:21:200::6046| 190 # |10.0.0.3 |apply seg6 encap segs fc00:21:200::6046| 347 encap seg6 mode encap segs ${vpn_sid} dev veth0 349 encap seg6 mode encap segs [all...] |
/third_party/musl/porting/linux/user/ldso/ |
H A D | cfi.c | 148 if (a-pldso->loadmap->segs[i].p_vaddr in is_addr_in_ldso() 149 < pldso->loadmap->segs[i].p_memsz) in is_addr_in_ldso()
|
H A D | dynlink.c | 393 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 394 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 402 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 403 size_t b = a + p->loadmap->segs[j].p_memsz; in laddr_pg() 409 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 1315 if (!dso->loadmap->segs[i].p_memsz) in unmap_library() 1318 munmap((void *)dso->loadmap->segs[ in unmap_library() [all...] |
/third_party/musl/libc-test/src/functionalext/unittest/ |
H A D | unit_test_ldso_dynlink.c | 401 temp_dso.loadmap->segs[0].addr = (uintptr_t)p; in dynlink_test_0250() 402 temp_dso.loadmap->segs[0].p_memsz = 1; in dynlink_test_0250()
|
/third_party/vk-gl-cts/external/amber/src/src/vkscript/ |
H A D | parser.cc | 403 auto& segs = header.format->GetSegments(); in ProcessVertexDataBlock() local 404 for (const auto& seg : segs) { in ProcessVertexDataBlock()
|
/third_party/musl/ldso/linux/ |
H A D | dynlink.c | 427 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); in laddr() 428 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr() 436 size_t a = p->loadmap->segs[j].p_vaddr; in laddr_pg() 437 size_t b = a + p->loadmap->segs[j].p_memsz; in laddr_pg() 443 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); in laddr_pg() 1350 if (!dso->loadmap->segs[i].p_memsz) in unmap_library() 1353 munmap((void *)dso->loadmap->segs[ in unmap_library() [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-merge.c | 232 * @segs: [out] number of segments in the bio with the first half of the sectors 249 unsigned *segs) in blk_bio_segment_split() 279 *segs = nsegs; in blk_bio_segment_split() 282 *segs = nsegs; in blk_bio_segment_split() 246 blk_bio_segment_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, unsigned *segs) blk_bio_segment_split() argument
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-merge.c | 263 * @segs: [out] number of segments in the bio with the first half of the sectors 280 unsigned *segs, struct bio_set *bs, unsigned max_bytes) in bio_split_rw() 309 *segs = nsegs; in bio_split_rw() 322 *segs = nsegs; in bio_split_rw() 279 bio_split_rw(struct bio *bio, const struct queue_limits *lim, unsigned *segs, struct bio_set *bs, unsigned max_bytes) bio_split_rw() argument
|