Lines Matching refs:end

387 			"va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
388 va_block->start, va_block->end, va_block->size);
411 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
412 prev->end = va_block->end;
413 prev->size = prev->end - prev->start;
420 if (&next->node != va_list && va_block->end + 1 == next->start) {
422 next->size = next->end - next->start;
434 * @end : end virtual address
443 struct list_head *va_list, u64 start, u64 end)
446 u64 size = end - start;
453 va_block->end)) {
455 "block crossing ranges at start 0x%llx, end 0x%llx\n",
456 va_block->start, va_block->end);
460 if (va_block->end < start)
469 va_block->end = end;
490 * @end : end virtual address
496 struct hl_va_range *va_range, u64 start, u64 end)
501 rc = add_va_block_locked(hdev, &va_range->list, start, end);
546 if (valid_start > va_block->end)
550 valid_size = va_block->end - valid_start;
560 ((hint_addr + size) <= va_block->end)) {
586 new_va_block->size = new_va_block->end - new_va_block->start;
1106 * at the loop end rather than for each iteration
1350 u64 start, end;
1381 end = PAGE_ALIGN(addr + size);
1382 npages = (end - start) >> PAGE_SHIFT;
1494 * @end: range end address
1501 u64 start, u64 end)
1514 if (end & (PAGE_SIZE - 1))
1515 end &= PAGE_MASK;
1517 if (start >= end) {
1522 rc = add_va_block(hdev, va_range, start, end);
1530 va_range->end_addr = end;
1558 * @host_range_end: host virtual addresses range end.
1561 * @host_huge_range_end: host virtual addresses range end for memory allocated
1564 * @dram_range_end: dram virtual addresses range end.