Lines Matching refs:start
54 "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
59 "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
212 for (pfn = (range->start >> PAGE_SHIFT);
241 static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
252 xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
277 dmirror_do_update(dmirror, range->start, range->end);
327 static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
347 for (addr = start; addr < end; addr = range.end) {
348 range.start = addr;
360 static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
366 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
368 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
389 unsigned long start, end;
393 start = cmd->addr;
394 end = start + size;
395 if (end < start)
398 ret = dmirror_bounce_init(&bounce, start, size);
404 ret = dmirror_do_read(dmirror, start, end, &bounce);
409 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
410 ret = dmirror_fault(dmirror, start, end, false);
426 static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
432 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
434 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
455 unsigned long start, end;
459 start = cmd->addr;
460 end = start + size;
461 if (end < start)
464 ret = dmirror_bounce_init(&bounce, start, size);
475 ret = dmirror_do_write(dmirror, start, end, &bounce);
480 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
481 ret = dmirror_fault(dmirror, start, end, true);
514 devmem->pagemap.range.start = res->start;
519 devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
522 devmem->pagemap.range.end = devmem->pagemap.range.start +
561 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
592 release_mem_region(devmem->pagemap.range.start,
647 for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
693 static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
698 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
709 static int dmirror_atomic_map(unsigned long start, unsigned long end,
718 for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
742 unsigned long start = args->start;
751 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
780 unsigned long start, end, addr;
788 start = cmd->addr;
789 end = start + size;
790 if (end < start)
798 for (addr = start; addr < end; addr = next) {
832 ret = dmirror_bounce_init(&bounce, start, size);
836 ret = dmirror_do_read(dmirror, start, end, &bounce);
854 unsigned long start = args->start;
858 for (addr = start; addr < end; addr += PAGE_SIZE,
903 unsigned long start, end, addr;
913 start = cmd->addr;
914 end = start + size;
915 if (end < start)
924 for (addr = start; addr < end; addr = next) {
937 args.start = addr;
963 unsigned long start, end, addr;
974 start = cmd->addr;
975 end = start + size;
976 if (end < start)
984 for (addr = start; addr < end; addr = next) {
997 args.start = addr;
1018 ret = dmirror_bounce_init(&bounce, start, size);
1022 ret = dmirror_do_read(dmirror, start, end, &bounce);
1123 range->start, range->end - range->start,
1154 n = (range->end - range->start) >> PAGE_SHIFT;
1168 unsigned long start, end;
1181 start = cmd->addr;
1182 end = start + size;
1183 if (end < start)
1195 for (addr = start; addr < end; addr = next) {
1199 range.start = addr;
1206 n = (range.end - range.start) >> PAGE_SHIFT;
1222 unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
1286 release_mem_region(devmem->pagemap.range.start,
1441 args.start = vmf->address;
1442 args.end = args.start + PAGE_SIZE;