Lines Matching refs:ptr

35         ptr: *const u8,
41 let mut at = sub(ptr, start_ptr);
67 let mut ptr = start_ptr;
77 if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
81 ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
82 debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
83 while loop_size == LOOP_SIZE && ptr <= end_ptr.sub(loop_size) {
84 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
86 let a = _mm256_load_si256(ptr as *const __m256i);
87 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
88 let c = _mm256_load_si256(ptr.add(2 * VECTOR_SIZE) as *const __m256i);
89 let d = _mm256_load_si256(ptr.add(3 * VECTOR_SIZE) as *const __m256i);
99 return Some(matched(start_ptr, ptr, eqa, eqb, eqc, eqd));
101 ptr = ptr.add(loop_size);
103 while ptr <= end_ptr.sub(VECTOR_SIZE) {
104 debug_assert!(sub(end_ptr, ptr) >= VECTOR_SIZE);
106 if let Some(i) = forward_search1(start_ptr, end_ptr, ptr, vn1) {
109 ptr = ptr.add(VECTOR_SIZE);
111 if ptr < end_ptr {
112 debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
113 ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
114 debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
116 return forward_search1(start_ptr, end_ptr, ptr, vn1);
128 ptr: *const u8,
134 let mut at = sub(ptr, start_ptr);
153 let mut ptr = start_ptr;
156 while ptr < end_ptr {
157 if *ptr == n1 || *ptr == n2 {
158 return Some(sub(ptr, start_ptr));
160 ptr = ptr.offset(1);
165 if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
169 ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
170 debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
171 while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
172 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
174 let a = _mm256_load_si256(ptr as *const __m256i);
175 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
184 return Some(matched(start_ptr, ptr, eqa1, eqa2, eqb1, eqb2));
186 ptr = ptr.add(loop_size);
188 while ptr <= end_ptr.sub(VECTOR_SIZE) {
189 if let Some(i) = forward_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
192 ptr = ptr.add(VECTOR_SIZE);
194 if ptr < end_ptr {
195 debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
196 ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
197 debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
199 return forward_search2(start_ptr, end_ptr, ptr, vn1, vn2);
216 ptr: *const u8,
224 let mut at = sub(ptr, start_ptr);
246 let mut ptr = start_ptr;
249 while ptr < end_ptr {
250 if *ptr == n1 || *ptr == n2 || *ptr == n3 {
251 return Some(sub(ptr, start_ptr));
253 ptr = ptr.offset(1);
258 if let Some(i) = forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
262 ptr = ptr.add(VECTOR_SIZE - (start_ptr as usize & VECTOR_ALIGN));
263 debug_assert!(ptr > start_ptr && end_ptr.sub(VECTOR_SIZE) >= start_ptr);
264 while loop_size == LOOP_SIZE2 && ptr <= end_ptr.sub(loop_size) {
265 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
267 let a = _mm256_load_si256(ptr as *const __m256i);
268 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
282 start_ptr, ptr, eqa1, eqa2, eqa3, eqb1, eqb2, eqb3,
285 ptr = ptr.add(loop_size);
287 while ptr <= end_ptr.sub(VECTOR_SIZE) {
289 forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
293 ptr = ptr.add(VECTOR_SIZE);
295 if ptr < end_ptr {
296 debug_assert!(sub(end_ptr, ptr) < VECTOR_SIZE);
297 ptr = ptr.sub(VECTOR_SIZE - sub(end_ptr, ptr));
298 debug_assert_eq!(sub(end_ptr, ptr), VECTOR_SIZE);
300 return forward_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3);
312 let mut ptr = end_ptr;
315 while ptr > start_ptr {
316 ptr = ptr.offset(-1);
317 if *ptr == n1 {
318 return Some(sub(ptr, start_ptr));
324 ptr = ptr.sub(VECTOR_SIZE);
325 if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
329 ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
330 debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
331 while loop_size == LOOP_SIZE && ptr >= start_ptr.add(loop_size) {
332 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
334 ptr = ptr.sub(loop_size);
335 let a = _mm256_load_si256(ptr as *const __m256i);
336 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
337 let c = _mm256_load_si256(ptr.add(2 * VECTOR_SIZE) as *const __m256i);
338 let d = _mm256_load_si256(ptr.add(3 * VECTOR_SIZE) as *const __m256i);
347 let mut at = sub(ptr.add(3 * VECTOR_SIZE), start_ptr);
371 while ptr >= start_ptr.add(VECTOR_SIZE) {
372 ptr = ptr.sub(VECTOR_SIZE);
373 if let Some(i) = reverse_search1(start_ptr, end_ptr, ptr, vn1) {
377 if ptr > start_ptr {
378 debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
392 let mut ptr = end_ptr;
395 while ptr > start_ptr {
396 ptr = ptr.offset(-1);
397 if *ptr == n1 || *ptr == n2 {
398 return Some(sub(ptr, start_ptr));
404 ptr = ptr.sub(VECTOR_SIZE);
405 if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
409 ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
410 debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
411 while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
412 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
414 ptr = ptr.sub(loop_size);
415 let a = _mm256_load_si256(ptr as *const __m256i);
416 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
425 let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
438 while ptr >= start_ptr.add(VECTOR_SIZE) {
439 ptr = ptr.sub(VECTOR_SIZE);
440 if let Some(i) = reverse_search2(start_ptr, end_ptr, ptr, vn1, vn2) {
444 if ptr > start_ptr {
445 debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
465 let mut ptr = end_ptr;
468 while ptr > start_ptr {
469 ptr = ptr.offset(-1);
470 if *ptr == n1 || *ptr == n2 || *ptr == n3 {
471 return Some(sub(ptr, start_ptr));
477 ptr = ptr.sub(VECTOR_SIZE);
478 if let Some(i) = reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3) {
482 ptr = (end_ptr as usize & !VECTOR_ALIGN) as *const u8;
483 debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
484 while loop_size == LOOP_SIZE2 && ptr >= start_ptr.add(loop_size) {
485 debug_assert_eq!(0, (ptr as usize) % VECTOR_SIZE);
487 ptr = ptr.sub(loop_size);
488 let a = _mm256_load_si256(ptr as *const __m256i);
489 let b = _mm256_load_si256(ptr.add(VECTOR_SIZE) as *const __m256i);
502 let mut at = sub(ptr.add(VECTOR_SIZE), start_ptr);
517 while ptr >= start_ptr.add(VECTOR_SIZE) {
518 ptr = ptr.sub(VECTOR_SIZE);
520 reverse_search3(start_ptr, end_ptr, ptr, vn1, vn2, vn3)
525 if ptr > start_ptr {
526 debug_assert!(sub(ptr, start_ptr) < VECTOR_SIZE);
536 ptr: *const u8,
540 debug_assert!(start_ptr <= ptr);
541 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
543 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
546 Some(sub(ptr, start_ptr) + forward_pos(mask))
556 ptr: *const u8,
561 debug_assert!(start_ptr <= ptr);
562 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
564 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
570 Some(sub(ptr, start_ptr) + forward_pos2(mask1, mask2))
580 ptr: *const u8,
586 debug_assert!(start_ptr <= ptr);
587 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
589 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
598 Some(sub(ptr, start_ptr) + forward_pos3(mask1, mask2, mask3))
608 ptr: *const u8,
612 debug_assert!(start_ptr <= ptr);
613 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
615 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
618 Some(sub(ptr, start_ptr) + reverse_pos(mask))
628 ptr: *const u8,
633 debug_assert!(start_ptr <= ptr);
634 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
636 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
642 Some(sub(ptr, start_ptr) + reverse_pos2(mask1, mask2))
652 ptr: *const u8,
658 debug_assert!(start_ptr <= ptr);
659 debug_assert!(ptr <= end_ptr.sub(VECTOR_SIZE));
661 let chunk = _mm256_loadu_si256(ptr as *const __m256i);
670 Some(sub(ptr, start_ptr) + reverse_pos3(mask1, mask2, mask3))