Lines Matching refs:umem

72 	struct xsk_umem *umem;
92 int xsk_umem__fd(const struct xsk_umem *umem)
94 return umem ? umem->fd : -EINVAL;
161 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
170 &umem->config.fill_size,
171 sizeof(umem->config.fill_size));
176 &umem->config.comp_size,
177 sizeof(umem->config.comp_size));
185 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
191 fill->mask = umem->config.fill_size - 1;
192 fill->size = umem->config.fill_size;
197 fill->cached_cons = umem->config.fill_size;
199 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
207 comp->mask = umem->config.comp_size - 1;
208 comp->size = umem->config.comp_size;
217 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
227 struct xsk_umem *umem;
235 umem = calloc(1, sizeof(*umem));
236 if (!umem)
239 umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
240 if (umem->fd < 0) {
245 umem->umem_area = umem_area;
246 INIT_LIST_HEAD(&umem->ctx_list);
247 xsk_set_umem_config(&umem->config, usr_config);
252 mr.chunk_size = umem->config.frame_size;
253 mr.headroom = umem->config.frame_headroom;
254 mr.flags = umem->config.flags;
256 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
262 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
266 umem->fill_save = fill;
267 umem->comp_save = comp;
268 *umem_ptr = umem;
272 close(umem->fd);
274 free(umem);
456 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
461 if (list_empty(&umem->ctx_list))
464 list_for_each_entry(ctx, &umem->ctx_list, list) {
476 struct xsk_umem *umem = ctx->umem;
486 err = xsk_get_mmap_offsets(umem->fd, &off);
490 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
492 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
501 struct xsk_umem *umem, int ifindex,
513 if (!umem->fill_save) {
514 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
519 } else if (umem->fill_save != fill || umem->comp_save != comp) {
521 memcpy(fill, umem->fill_save, sizeof(*fill));
522 memcpy(comp, umem->comp_save, sizeof(*comp));
527 ctx->umem = umem;
532 list_add(&ctx->list, &umem->ctx_list);
538 __u32 queue_id, struct xsk_umem *umem,
553 if (!umem || !xsk_ptr || !(rx || tx))
556 unmap = umem->fill_save != fill;
566 if (umem->refcount++ > 0) {
573 xsk->fd = umem->fd;
574 rx_setup_done = umem->rx_ring_setup_done;
575 tx_setup_done = umem->tx_ring_setup_done;
578 ctx = xsk_get_ctx(umem, ifindex, queue_id);
585 ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
601 if (xsk->fd == umem->fd)
602 umem->rx_ring_setup_done = true;
612 if (xsk->fd == umem->fd)
613 umem->tx_ring_setup_done = true;
670 if (umem->refcount > 1) {
672 sxdp.sxdp_shared_umem_fd = umem->fd;
684 umem->fill_save = NULL;
685 umem->comp_save = NULL;
699 if (--umem->refcount)
707 __u32 queue_id, struct xsk_umem *umem,
711 if (!umem)
714 return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
715 rx, tx, umem->fill_save,
716 umem->comp_save, usr_config);
719 int xsk_umem__delete(struct xsk_umem *umem)
724 if (!umem)
727 if (umem->refcount)
730 err = xsk_get_mmap_offsets(umem->fd, &off);
731 if (!err && umem->fill_save && umem->comp_save) {
732 munmap(umem->fill_save->ring - off.fr.desc,
733 off.fr.desc + umem->config.fill_size * sizeof(__u64));
734 munmap(umem->comp_save->ring - off.cr.desc,
735 off.cr.desc + umem->config.comp_size * sizeof(__u64));
738 close(umem->fd);
739 free(umem);
748 struct xsk_umem *umem;
756 umem = ctx->umem;
772 umem->refcount--;
773 /* Do not close an fd that also has an associated umem connected
776 if (xsk->fd != umem->fd)