Home
last modified time | relevance | path

Searched refs:benchmark_name (Results 1 - 13 of 13) sorted by relevance

/third_party/skia/third_party/externals/angle2/scripts/
H A Dprocess_angle_perf_results.py130 for benchmark_name, directories in benchmark_directory_map.items():
133 is_ref = '.reference' in benchmark_name
157 logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
162 logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
164 benchmark_enabled_map[benchmark_name] = True
180 for benchmark_name, directories in benchmark_directory_map.items():
186 name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
187 benchmark_logs_links[benchmark_name].append(uploaded_link)
234 benchmark_name = _get_benchmark_name(directory)
235 if benchmark_name i
[all...]
/third_party/node/deps/v8/tools/
H A Dperf-compare.py175 def getBenchmark(self, benchmark_name):
176 benchmark_object = self.benchmarks_.get(benchmark_name)
178 benchmark_object = Benchmark(benchmark_name)
179 self.benchmarks_[benchmark_name] = benchmark_object
352 def StartBenchmark(self, benchmark_name):
354 self.Print(" <td class=\"name-column\">%s</td>" % benchmark_name)
428 benchmark_name = "/".join(trace["graphs"][1:])
435 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
450 for benchmark_name in benchmark_suite_object.SortedTestKeys():
451 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
[all...]
/third_party/protobuf/benchmarks/php/
H A DPhpBenchmark.php47 private $benchmark_name; variable
53 public function __construct($benchmark_name, $args, $total_bytes,
56 $this->benchmark_name = $benchmark_name; variable
73 call_user_func_array($this->benchmark_name, array(&$this->args));
/third_party/python/Tools/importbench/
H A Dimportbench.py212 benchmark_name = benchmark.__doc__
213 old_result = max(prev_results[benchmark_name])
214 new_result = max(new_results[benchmark_name])
218 print(benchmark_name, ':', result)
/third_party/benchmark/test/
H A Dskip_with_error_test.cc36 BM_CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
H A Dregister_benchmark_test.cc33 BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
/third_party/benchmark/src/
H A Dstatistics.cc133 BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
H A Dcsv_reporter.cc111 Out << CsvEscape(run.benchmark_name()) << ","; in PrintRunData()
H A Dreporter.cc98 std::string BenchmarkReporter::Run::benchmark_name() const { in benchmark_name() function in benchmark::BenchmarkReporter::Run
H A Dconsole_reporter.cc136 result.benchmark_name().c_str()); in PrintRunData()
H A Djson_reporter.cc224 out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
/third_party/benchmark/tools/gbench/
H A Dreport.py257 benchmark_name = partition[0][0]['name']
298 'name': benchmark_name,
/third_party/benchmark/include/benchmark/
H A Dbenchmark.h1759 std::string benchmark_name() const;

Completed in 9 milliseconds