Lines Matching refs:umem

139 	struct xsk_umem *umem;
146 struct xsk_umem_info *umem;
483 struct xsk_umem *umem = xsks[0]->umem->umem;
489 (void)xsk_umem__delete(umem);
785 static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
787 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
793 struct xsk_umem_info *umem;
812 umem = calloc(1, sizeof(*umem));
813 if (!umem)
816 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
821 umem->buffer = buffer;
822 return umem;
825 static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
830 ret = xsk_ring_prod__reserve(&umem->fq,
835 *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
837 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
840 static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
853 xsk->umem = umem;
865 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
903 {"shared-umem", no_argument, 0, 'M'},
936 " -M, --shared-umem Enable XDP_SHARED_UMEM\n"
1104 struct xsk_umem_info *umem = xsk->umem;
1126 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
1131 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1135 if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
1139 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1143 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
1144 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
1146 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1147 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1167 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
1169 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1183 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1190 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1194 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1198 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1207 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1210 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
1213 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1347 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1372 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1470 struct xsk_umem_info *umem;
1487 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1497 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
1500 xsk_populate_fill_ring(umem);
1505 xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
1511 gen_eth_frame(umem, i * opt_xsk_frame_size);