Lines Matching defs:rb

42 static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
45 munmap(r->consumer_pos, rb->page_size);
49 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
55 int ring_buffer__add(struct ring_buffer *rb, int map_fd,
82 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
85 rb->rings = tmp;
87 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
90 rb->events = tmp;
92 r = &rb->rings[rb->ring_cnt];
101 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
114 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
119 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
122 ringbuf_unmap_ring(rb, r);
128 r->data = tmp + rb->page_size;
130 e = &rb->events[rb->ring_cnt];
134 e->data.fd = rb->ring_cnt;
135 if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
137 ringbuf_unmap_ring(rb, r);
143 rb->ring_cnt++;
147 void ring_buffer__free(struct ring_buffer *rb)
151 if (!rb)
154 for (i = 0; i < rb->ring_cnt; ++i)
155 ringbuf_unmap_ring(rb, &rb->rings[i]);
156 if (rb->epoll_fd >= 0)
157 close(rb->epoll_fd);
159 free(rb->events);
160 free(rb->rings);
161 free(rb);
168 struct ring_buffer *rb;
174 rb = calloc(1, sizeof(*rb));
175 if (!rb)
178 rb->page_size = getpagesize();
180 rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
181 if (rb->epoll_fd < 0) {
187 err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
191 return rb;
194 ring_buffer__free(rb);
257 int ring_buffer__consume(struct ring_buffer *rb)
262 for (i = 0; i < rb->ring_cnt; i++) {
263 struct ring *ring = &rb->rings[i];
279 int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
284 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
289 __u32 ring_id = rb->events[i].data.fd;
290 struct ring *ring = &rb->rings[ring_id];