Lines Matching defs:latency
42 * compute a DAG of the dependencies (RAW ordering with latency, WAW
43 * ordering with latency, WAR ordering), and make a list of the DAG heads.
78 int latency;
87 * This is the sum of the instruction's latency plus the maximum delay of
127 this->latency = 1 * chans * math_latency;
130 this->latency = 2 * chans * math_latency;
136 this->latency = 3 * chans * math_latency;
141 this->latency = 4 * chans * math_latency;
144 this->latency = 8 * chans * math_latency;
148 /* minimum latency, max is 12 rounds. */
149 this->latency = 5 * chans * math_latency;
152 this->latency = 2;
182 * higher latency.
184 latency = is_haswell ? 16 : 18;
208 * higher latency.
210 latency = 14;
229 latency = is_haswell ? 14 : 16;
240 latency = is_haswell ? 22 : 24;
262 * So the latency on our first texture load of the batchbuffer takes
276 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
297 latency = 200;
324 latency = 100;
357 latency = 200;
369 latency = 50;
374 latency = 14000;
380 latency = is_haswell ? 300 : 600;
391 latency = 100;
396 latency = 200;
408 latency = 600;
414 latency = 14000;
419 latency = 600;
435 latency = 200;
445 latency = 300;
466 * gives an average latency of 583 cycles per surface read,
470 latency = 600;
482 * gives an average latency of 13867 cycles per atomic op,
484 * pessimistic estimate, the actual latency in cases with few
489 latency = 14000;
510 latency = 300;
523 latency = 14000;
539 latency = 300;
561 latency = 1400;
575 latency = 200;
579 latency = 200;
595 latency = 14;
654 void add_dep(schedule_node *before, schedule_node *after, int latency);
980 this->latency = 1;
1007 n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
1047 * schedule it @latency cycles after @before, but no guarantees there.
1051 int latency)
1060 before->child_latency[i] = MAX2(before->child_latency[i], latency);
1079 before->child_latency[before->child_count] = latency;
1090 add_dep(before, after, before->latency);
1610 * latency.
1840 time + chosen->latency);