Lines Matching refs:chunk
47 * per chunk. Note that the chunks are not necessarily in contiguous
55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
59 if (chunk->nsg > 0)
60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->sg[i]),
65 get_order(chunk->sg[i].length));
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
72 for (i = 0; i < chunk->npages; ++i)
74 chunk->buf[i].size,
75 chunk->buf[i].addr,
76 chunk->buf[i].dma_addr);
81 struct mlx4_icm_chunk *chunk, *tmp;
86 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
88 mlx4_free_icm_coherent(dev, chunk);
90 mlx4_free_icm_pages(dev, chunk);
92 kfree(chunk);
136 struct mlx4_icm_chunk *chunk = NULL;
160 if (!chunk) {
161 chunk = kzalloc_node(sizeof(*chunk),
165 if (!chunk) {
166 chunk = kzalloc(sizeof(*chunk),
169 if (!chunk)
172 chunk->coherent = coherent;
175 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176 list_add_tail(&chunk->list, &icm->chunk_list);
188 &chunk->buf[chunk->npages],
191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
202 ++chunk->npages;
205 ++chunk->nsg;
206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
207 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
208 chunk->sg, chunk->npages,
211 if (chunk->nsg <= 0)
215 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
216 chunk = NULL;
221 if (!coherent && chunk) {
222 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
223 chunk->npages, DMA_BIDIRECTIONAL);
225 if (chunk->nsg <= 0)
319 struct mlx4_icm_chunk *chunk;
335 list_for_each_entry(chunk, &icm->chunk_list, list) {
336 for (i = 0; i < chunk->npages; ++i) {
341 len = chunk->buf[i].size;
342 dma_addr = chunk->buf[i].dma_addr;
343 addr = chunk->buf[i].addr;
347 len = sg_dma_len(&chunk->sg[i]);
348 dma_addr = sg_dma_address(&chunk->sg[i]);
355 page = sg_page(&chunk->sg[i]);
460 * Add a reference to this ICM chunk so that it never