Lines Matching defs:buf
61 struct ispstat_buffer *buf,
68 dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
69 dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
74 struct ispstat_buffer *buf,
81 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
86 struct ispstat_buffer *buf,
93 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
98 struct ispstat_buffer *buf)
101 buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
106 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
109 for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
121 for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
131 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
138 struct ispstat_buffer *buf)
143 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
147 * buf->buf_size is set only after the buffer is queued. For now the
151 memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
152 memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
154 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
159 struct ispstat_buffer *buf)
164 dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
165 buf->sgt.nents, DMA_FROM_DEVICE);
169 struct ispstat_buffer *buf)
174 dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
175 buf->sgt.nents, DMA_FROM_DEVICE);
183 stat->buf[i].empty = 1;
193 struct ispstat_buffer *curr = &stat->buf[i];
282 struct ispstat_buffer *buf;
287 buf = isp_stat_buf_find_oldest(stat);
288 if (!buf) {
294 if (isp_stat_buf_check_magic(stat, buf)) {
299 buf->empty = 1;
306 stat->locked_buf = buf;
310 if (buf->buf_size > data->buf_size) {
318 isp_stat_buf_sync_for_cpu(stat, buf);
320 rval = copy_to_user(data->buf,
321 buf->virt_addr,
322 buf->buf_size);
328 buf = ERR_PTR(-EFAULT);
332 return buf;
342 struct ispstat_buffer *buf = &stat->buf[i];
344 if (!buf->virt_addr)
347 sg_free_table(&buf->sgt);
349 dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
350 buf->dma_addr);
352 buf->dma_addr = 0;
353 buf->virt_addr = NULL;
354 buf->empty = 1;
365 struct ispstat_buffer *buf,
370 buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
372 if (!buf->virt_addr)
375 ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
378 dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
379 buf->virt_addr = NULL;
380 buf->dma_addr = 0;
435 struct ispstat_buffer *buf = &stat->buf[i];
438 ret = isp_stat_bufs_alloc_one(dev, buf, size);
447 buf->empty = 1;
451 stat->subdev.name, i, &buf->dma_addr, buf->virt_addr);
484 struct ispstat_buffer *buf;
493 buf = isp_stat_buf_get(stat, data);
494 if (IS_ERR(buf)) {
496 return PTR_ERR(buf);
499 data->ts.tv_sec = buf->ts.tv_sec;
500 data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
501 data->config_counter = buf->config_counter;
502 data->frame_number = buf->frame_number;
503 data->buf_size = buf->buf_size;
505 buf->empty = 1;
524 memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts));
1057 stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
1058 if (!stat->buf)
1068 kfree(stat->buf);
1079 kfree(stat->buf);