Lines Matching defs:pg_vec

525 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
615 struct pgv *pg_vec,
624 p1->pkbdq = pg_vec;
625 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
626 p1->pkblk_start = pg_vec[0].buffer;
2521 if (likely(po->tx_ring.pg_vec)) {
2747 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2750 if (unlikely(!po->tx_ring.pg_vec)) {
3113 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3116 if (data_race(po->tx_ring.pg_vec))
3160 if (po->rx_ring.pg_vec) {
3165 if (po->tx_ring.pg_vec) {
3863 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3883 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3902 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3956 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4020 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4275 if (po->rx_ring.pg_vec) {
4283 if (po->tx_ring.pg_vec) {
4321 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4327 if (likely(pg_vec[i].buffer)) {
4328 if (is_vmalloc_addr(pg_vec[i].buffer))
4329 vfree(pg_vec[i].buffer);
4331 free_pages((unsigned long)pg_vec[i].buffer,
4333 pg_vec[i].buffer = NULL;
4336 kfree(pg_vec);
4367 struct pgv *pg_vec;
4370 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4371 if (unlikely(!pg_vec))
4375 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4376 if (unlikely(!pg_vec[i].buffer))
4381 return pg_vec;
4384 free_pg_vec(pg_vec, order, block_nr);
4385 pg_vec = NULL;
4392 struct pgv *pg_vec = NULL;
4419 if (unlikely(rb->pg_vec))
4460 pg_vec = alloc_pg_vec(req, order);
4461 if (unlikely(!pg_vec))
4467 init_prb_bdqc(po, rb, pg_vec, req_u);
4514 swap(rb->pg_vec, pg_vec);
4526 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4541 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4548 if (pg_vec) {
4550 free_pg_vec(pg_vec, order, req->tp_block_nr);
4574 if (rb->pg_vec) {
4590 if (rb->pg_vec == NULL)
4595 void *kaddr = rb->pg_vec[i].buffer;