Lines Matching refs:umem
65 struct xsk_umem *umem;
104 int xsk_umem__fd(const struct xsk_umem *umem)
106 return umem ? umem->fd : -EINVAL;
216 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
225 &umem->config.fill_size,
226 sizeof(umem->config.fill_size));
231 &umem->config.comp_size,
232 sizeof(umem->config.comp_size));
240 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
246 fill->mask = umem->config.fill_size - 1;
247 fill->size = umem->config.fill_size;
252 fill->cached_cons = umem->config.fill_size;
254 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
262 comp->mask = umem->config.comp_size - 1;
263 comp->size = umem->config.comp_size;
272 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
282 struct xsk_umem *umem;
290 umem = calloc(1, sizeof(*umem));
291 if (!umem)
294 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
295 if (umem->fd < 0) {
300 umem->umem_area = umem_area;
301 INIT_LIST_HEAD(&umem->ctx_list);
302 xsk_set_umem_config(&umem->config, usr_config);
307 mr.chunk_size = umem->config.frame_size;
308 mr.headroom = umem->config.frame_headroom;
309 mr.flags = umem->config.flags;
311 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
317 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
321 umem->fill_save = fill;
322 umem->comp_save = comp;
323 *umem_ptr = umem;
327 close(umem->fd);
329 free(umem);
615 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
620 if (list_empty(&umem->ctx_list))
623 list_for_each_entry(ctx, &umem->ctx_list, list) {
635 struct xsk_umem *umem = ctx->umem;
645 err = xsk_get_mmap_offsets(umem->fd, &off);
649 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
651 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
660 struct xsk_umem *umem, int ifindex,
672 if (!umem->fill_save) {
673 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
678 } else if (umem->fill_save != fill || umem->comp_save != comp) {
680 memcpy(fill, umem->fill_save, sizeof(*fill));
681 memcpy(comp, umem->comp_save, sizeof(*comp));
686 ctx->umem = umem;
693 list_add(&ctx->list, &umem->ctx_list);
699 __u32 queue_id, struct xsk_umem *umem,
714 if (!umem || !xsk_ptr || !(rx || tx))
717 unmap = umem->fill_save != fill;
734 if (umem->refcount++ > 0) {
741 xsk->fd = umem->fd;
742 rx_setup_done = umem->rx_ring_setup_done;
743 tx_setup_done = umem->tx_ring_setup_done;
746 ctx = xsk_get_ctx(umem, ifindex, queue_id);
753 ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
770 if (xsk->fd == umem->fd)
771 umem->rx_ring_setup_done = true;
781 if (xsk->fd == umem->fd)
782 umem->tx_ring_setup_done = true;
839 if (umem->refcount > 1) {
841 sxdp.sxdp_shared_umem_fd = umem->fd;
859 umem->fill_save = NULL;
860 umem->comp_save = NULL;
874 if (--umem->refcount)
882 __u32 queue_id, struct xsk_umem *umem,
886 if (!umem)
889 return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
890 rx, tx, umem->fill_save,
891 umem->comp_save, usr_config);
894 int xsk_umem__delete(struct xsk_umem *umem)
899 if (!umem)
902 if (umem->refcount)
905 err = xsk_get_mmap_offsets(umem->fd, &off);
906 if (!err && umem->fill_save && umem->comp_save) {
907 munmap(umem->fill_save->ring - off.fr.desc,
908 off.fr.desc + umem->config.fill_size * sizeof(__u64));
909 munmap(umem->comp_save->ring - off.cr.desc,
910 off.cr.desc + umem->config.comp_size * sizeof(__u64));
913 close(umem->fd);
914 free(umem);
923 struct xsk_umem *umem;
931 umem = ctx->umem;
952 umem->refcount--;
953 /* Do not close an fd that also has an associated umem connected
956 if (xsk->fd != umem->fd)