Lines Matching refs:batch

275 static void batch_clear(struct pfn_batch *batch)
277 batch->total_pfns = 0;
278 batch->end = 0;
279 batch->pfns[0] = 0;
280 batch->npfns[0] = 0;
285 * batch
287 static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
290 return batch_clear(batch);
293 WARN_ON(!batch->end ||
294 batch->npfns[batch->end - 1] < keep_pfns);
296 batch->total_pfns = keep_pfns;
297 batch->pfns[0] = batch->pfns[batch->end - 1] +
298 (batch->npfns[batch->end - 1] - keep_pfns);
299 batch->npfns[0] = keep_pfns;
300 batch->end = 1;
303 static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
305 if (!batch->total_pfns)
308 WARN_ON(batch->total_pfns != batch->npfns[0]);
309 skip_pfns = min(batch->total_pfns, skip_pfns);
310 batch->pfns[0] += skip_pfns;
311 batch->npfns[0] -= skip_pfns;
312 batch->total_pfns -= skip_pfns;
315 static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
318 const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns);
321 batch->pfns = temp_kmalloc(&size, backup, backup_len);
322 if (!batch->pfns)
326 batch->array_size = size / elmsz;
327 batch->npfns = (u32 *)(batch->pfns + batch->array_size);
328 batch_clear(batch);
332 static int batch_init(struct pfn_batch *batch, size_t max_pages)
334 return __batch_init(batch, max_pages, NULL, 0);
337 static void batch_init_backup(struct pfn_batch *batch, size_t max_pages,
340 __batch_init(batch, max_pages, backup, backup_len);
343 static void batch_destroy(struct pfn_batch *batch, void *backup)
345 if (batch->pfns != backup)
346 kfree(batch->pfns);
350 static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn)
352 const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns));
354 if (batch->end &&
355 pfn == batch->pfns[batch->end - 1] + batch->npfns[batch->end - 1] &&
356 batch->npfns[batch->end - 1] != MAX_NPFNS) {
357 batch->npfns[batch->end - 1]++;
358 batch->total_pfns++;
361 if (batch->end == batch->array_size)
363 batch->total_pfns++;
364 batch->pfns[batch->end] = pfn;
365 batch->npfns[batch->end] = 1;
366 batch->end++;
371 * Fill the batch with pfns from the domain. When the batch is full, or it
373 * batch->total_pfns to determine the starting point for the next iteration.
375 static void batch_from_domain(struct pfn_batch *batch,
391 * batch.
394 if (!batch_add_pfn(batch, PHYS_PFN(phys)))
426 static void batch_from_domain_continue(struct pfn_batch *batch,
432 unsigned int array_size = batch->array_size;
434 batch->array_size = batch->end;
435 batch_from_domain(batch, domain, area, start_index, last_index);
436 batch->array_size = array_size;
475 static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
492 while (cur < batch->end) {
494 next_iova + batch->npfns[cur] * PAGE_SIZE -
499 PFN_PHYS(batch->pfns[cur]) + page_offset,
503 PFN_PHYS(batch->pfns[cur]) + page_offset,
519 static void batch_from_xarray(struct pfn_batch *batch, struct xarray *xa,
532 if (!batch_add_pfn(batch, xa_to_value(entry)) ||
540 static void batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa,
553 if (!batch_add_pfn(batch, xa_to_value(entry)))
615 static void batch_from_pages(struct pfn_batch *batch, struct page **pages,
621 if (!batch_add_pfn(batch, page_to_pfn(*pages)))
625 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages,
631 if (batch->npfns[cur] > first_page_off)
633 first_page_off -= batch->npfns[cur];
639 batch->npfns[cur] - first_page_off);
642 pfn_to_page(batch->pfns[cur] + first_page_off),
666 static unsigned long batch_rw(struct pfn_batch *batch, void *data,
674 while (cur < batch->end) {
677 copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data,
684 if (npage == batch->npfns[cur]) {
939 struct pfn_batch batch;
953 * The batch can contain a mixture of pages that are still in use and pages that
970 batch_unpin(&pfns->batch, pages, span.start_hole - start,
988 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
1005 &pfns->batch, area->storage_domain, area, start_index,
1017 batch_from_pages(&pfns->batch,
1033 batch_clear(&pfns->batch);
1037 unsigned int npfns = pfns->batch.total_pfns;
1047 if (WARN_ON(!pfns->batch.total_pfns))
1051 pfns->batch_start_index + pfns->batch.total_pfns;
1056 if (npfns == pfns->batch.total_pfns)
1074 rc = batch_init(&pfns->batch, last_index - start_index + 1);
1098 /* Any pages not transferred to the batch are just unpinned */
1117 batch_destroy(&pfns->batch, NULL);
1197 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
1211 batch->total_pfns)
1213 batch->total_pfns !=
1215 batch_from_domain(batch, domain, area, start,
1217 batch_last_index = start_index + batch->total_pfns - 1;
1232 * batch.
1236 batch_from_domain_continue(batch, domain, area,
1243 start_index + batch->total_pfns - 1);
1244 *unmapped_end_index = start_index + batch->total_pfns;
1248 batch_unpin(batch, pages, 0,
1252 batch_clear_carry(batch,
1266 struct pfn_batch batch;
1281 * batch as they are moved to the front of the PFN list and continue on
1284 batch_init_backup(&batch, last_index + 1, backup, sizeof(backup));
1289 batch_skip_carry(&batch,
1293 iopt_area_unpin_domain(&batch, area, pages, domain,
1304 WARN_ON(batch.total_pfns);
1305 batch_destroy(&batch, backup);
1372 rc = batch_to_domain(&pfns.batch, domain, area,
1433 rc = batch_to_domain(&pfns.batch, domain, area,
1519 static void iopt_pages_unpin_xarray(struct pfn_batch *batch,
1525 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index,
1527 batch_unpin(batch, pages, 0, batch->total_pfns);
1528 start_index += batch->total_pfns;
1529 batch_clear(batch);
1548 struct pfn_batch batch;
1558 batch_init_backup(&batch,
1563 iopt_pages_unpin_xarray(&batch, pages, span.start_hole,
1573 batch_destroy(&batch, backup);
1761 done = batch_rw(&pfns.batch, data, offset, length, flags);