Lines Matching defs:chunk
48 * per chunk.
64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
68 if (chunk->nsg > 0)
69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
73 __free_pages(sg_page(&chunk->mem[i]),
74 get_order(chunk->mem[i].length));
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
81 for (i = 0; i < chunk->npages; ++i) {
82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
83 lowmem_page_address(sg_page(&chunk->mem[i])),
84 sg_dma_address(&chunk->mem[i]));
90 struct mthca_icm_chunk *chunk, *tmp;
95 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
97 mthca_free_icm_coherent(dev, chunk);
99 mthca_free_icm_pages(dev, chunk);
101 kfree(chunk);
141 struct mthca_icm_chunk *chunk = NULL;
158 if (!chunk) {
159 chunk = kmalloc(sizeof *chunk,
161 if (!chunk)
164 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
165 chunk->npages = 0;
166 chunk->nsg = 0;
167 list_add_tail(&chunk->list, &icm->chunk_list);
175 &chunk->mem[chunk->npages],
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
182 ++chunk->npages;
185 ++chunk->nsg;
186 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
187 chunk->nsg =
188 dma_map_sg(&dev->pdev->dev, chunk->mem,
189 chunk->npages,
192 if (chunk->nsg <= 0)
196 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
197 chunk = NULL;
207 if (!coherent && chunk) {
208 chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
209 chunk->npages, DMA_BIDIRECTIONAL);
211 if (chunk->nsg <= 0)
281 struct mthca_icm_chunk *chunk;
297 list_for_each_entry(chunk, &icm->chunk_list, list) {
298 for (i = 0; i < chunk->npages; ++i) {
300 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
301 *dma_handle = sg_dma_address(&chunk->mem[i]) +
303 dma_offset -= sg_dma_len(&chunk->mem[i]);
308 if (chunk->mem[i].length > offset) {
309 page = sg_page(&chunk->mem[i]);
312 offset -= chunk->mem[i].length;
403 * Add a reference to this ICM chunk so that it never