Lines Matching refs:rb
60 static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
63 munmap(r->consumer_pos, rb->page_size);
67 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
75 int ring_buffer__add(struct ring_buffer *rb, int map_fd,
102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
105 rb->rings = tmp;
107 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
110 rb->events = tmp;
115 rb->rings[rb->ring_cnt] = r;
123 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
136 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
142 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
150 r->data = tmp + rb->page_size;
152 e = &rb->events[rb->ring_cnt];
156 e->data.fd = rb->ring_cnt;
157 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
164 rb->ring_cnt++;
168 ringbuf_free_ring(rb, r);
172 void ring_buffer__free(struct ring_buffer *rb)
176 if (!rb)
179 for (i = 0; i < rb->ring_cnt; ++i)
180 ringbuf_free_ring(rb, rb->rings[i]);
181 if (rb->epoll_fd >= 0)
182 close(rb->epoll_fd);
184 free(rb->events);
185 free(rb->rings);
186 free(rb);
193 struct ring_buffer *rb;
199 rb = calloc(1, sizeof(*rb));
200 if (!rb)
203 rb->page_size = getpagesize();
205 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
206 if (rb->epoll_fd < 0) {
212 err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
216 return rb;
219 ring_buffer__free(rb);
282 int ring_buffer__consume(struct ring_buffer *rb)
287 for (i = 0; i < rb->ring_cnt; i++) {
288 struct ring *ring = rb->rings[i];
304 int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
309 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
314 __u32 ring_id = rb->events[i].data.fd;
315 struct ring *ring = rb->rings[ring_id];
328 int ring_buffer__epoll_fd(const struct ring_buffer *rb)
330 return rb->epoll_fd;
333 struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
335 if (idx >= rb->ring_cnt)
338 return rb->rings[idx];
385 static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
387 if (rb->consumer_pos) {
388 munmap(rb->consumer_pos, rb->page_size);
389 rb->consumer_pos = NULL;
391 if (rb->producer_pos) {
392 munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
393 rb->producer_pos = NULL;
397 void user_ring_buffer__free(struct user_ring_buffer *rb)
399 if (!rb)
402 user_ringbuf_unmap_ring(rb);
404 if (rb->epoll_fd >= 0)
405 close(rb->epoll_fd);
407 free(rb);
410 static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
433 rb->map_fd = map_fd;
434 rb->mask = info.max_entries - 1;
437 tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
444 rb->consumer_pos = tmp;
451 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
457 map_fd, rb->page_size);
465 rb->producer_pos = tmp;
466 rb->data = tmp + rb->page_size;
468 rb_epoll = &rb->event;
470 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
482 struct user_ring_buffer *rb;
488 rb = calloc(1, sizeof(*rb));
489 if (!rb)
492 rb->page_size = getpagesize();
494 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
495 if (rb->epoll_fd < 0) {
501 err = user_ringbuf_map(rb, map_fd);
505 return rb;
508 user_ring_buffer__free(rb);
512 static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
518 hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
519 hdr = rb->data + (hdr_offset & rb->mask);
531 void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
533 user_ringbuf_commit(rb, sample, true);
536 void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
538 user_ringbuf_commit(rb, sample, false);
541 void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
555 cons_pos = smp_load_acquire(rb->consumer_pos);
557 prod_pos = smp_load_acquire(rb->producer_pos);
559 max_size = rb->mask + 1;
570 hdr = rb->data + (prod_pos & rb->mask);
577 smp_store_release(rb->producer_pos, prod_pos + total_size);
579 return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
592 void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
612 sample = user_ring_buffer__reserve(rb, size);
629 cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
645 return user_ring_buffer__reserve(rb, size);