/third_party/protobuf/benchmarks/java/src/main/java/com/google/protobuf/ |
H A D | ProtoCaliperBenchmark.java | 13 import com.google.protobuf.benchmarks.Benchmarks.BenchmarkDataset; 38 return com.google.protobuf.benchmarks.BenchmarkMessage1Proto3.GoogleMessage1 46 return com.google.protobuf.benchmarks.BenchmarkMessage1Proto2.GoogleMessage1 54 return com.google.protobuf.benchmarks.BenchmarkMessage2.GoogleMessage2.getDefaultInstance(); 61 com.google.protobuf.benchmarks.BenchmarkMessage38.registerAllExtensions(extensions); 62 com.google.protobuf.benchmarks.BenchmarkMessage37.registerAllExtensions(extensions); 63 com.google.protobuf.benchmarks.BenchmarkMessage36.registerAllExtensions(extensions); 64 com.google.protobuf.benchmarks.BenchmarkMessage35.registerAllExtensions(extensions); 65 com.google.protobuf.benchmarks.BenchmarkMessage34.registerAllExtensions(extensions); 66 com.google.protobuf.benchmarks [all...] |
/third_party/protobuf/benchmarks/python/ |
H A D | python_benchmark_messages.cc | 3 #include "benchmarks.pb.h" 17 benchmarks::BenchmarkDataset().descriptor(); in initlibbenchmark_messages() 18 benchmarks::proto3::GoogleMessage1().descriptor(); in initlibbenchmark_messages() 19 benchmarks::proto2::GoogleMessage1().descriptor(); in initlibbenchmark_messages() 20 benchmarks::proto2::GoogleMessage2().descriptor(); in initlibbenchmark_messages() 21 benchmarks::google_message3::GoogleMessage3().descriptor(); in initlibbenchmark_messages() 22 benchmarks::google_message4::GoogleMessage4().descriptor(); in initlibbenchmark_messages()
|
/third_party/benchmark/tools/gbench/ |
H A D | util.py | 1 """util.py - General utilities for running, loading, and processing benchmarks 137 if 'benchmarks' in results: 138 results['benchmarks'] = list(filter(benchmark_wanted, 139 results['benchmarks'])) 144 benchmarks = result['benchmarks'] 147 benchmarks = sorted( 148 benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1) 149 benchmarks = sorted( 150 benchmarks, ke [all...] |
/third_party/protobuf/benchmarks/util/ |
H A D | proto3_data_stripper.cc | 1 #include "benchmarks.pb.h" 17 << "', please make sure you are running this command from the benchmarks" in ReadFile() 37 benchmarks::BenchmarkDataset dataset; in main() 43 if (dataset.message_name() == "benchmarks.proto3.GoogleMessage1") { in main() 44 message = new benchmarks::proto3::GoogleMessage1; in main() 45 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage1") { in main() 46 message = new benchmarks::proto2::GoogleMessage1; in main() 47 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage2") { in main() 48 message = new benchmarks::proto2::GoogleMessage2; in main() 50 "benchmarks in main() [all...] |
H A D | gogo_data_scrubber.cc | 1 #include "benchmarks.pb.h" 17 << "', please make sure you are running this command from the benchmarks" in ReadFile() 37 benchmarks::BenchmarkDataset dataset; in main() 43 if (dataset.message_name() == "benchmarks.proto3.GoogleMessage1") { in main() 44 message = new benchmarks::proto3::GoogleMessage1; in main() 45 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage1") { in main() 46 message = new benchmarks::proto2::GoogleMessage1; in main() 47 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage2") { in main() 48 message = new benchmarks::proto2::GoogleMessage2; in main() 50 "benchmarks in main() [all...] |
/third_party/node/tools/ |
H A D | test-v8.bat | 28 if not defined test_v8_intl goto test-v8-benchmarks 33 :test-v8-benchmarks 35 echo running 'python tools\run-tests.py %common_v8_test_options% benchmarks --slow-tests-cutoff 1000000 --json-test-results v8-benchmarks-tap.xml' 36 call python tools\run-tests.py %common_v8_test_options% benchmarks --slow-tests-cutoff 1000000 --json-test-results ./v8-benchmarks-tap.xml 37 call python ..\..\tools\v8-json-to-junit.py < v8-benchmarks-tap.xml > v8-benchmarks-tap.json
|
/third_party/node/benchmark/ |
H A D | _cli.js | 7 const benchmarks = {}; 14 benchmarks[category] = fs.readdirSync(path.resolve(__dirname, category)) 87 CLI.prototype.benchmarks = function() { 91 this.items = Object.keys(benchmarks); 95 if (benchmarks[category] === undefined) { 99 for (const scripts of benchmarks[category]) {
|
H A D | _benchmark_progress.js | 28 constructor(queue, benchmarks) { 30 this.benchmarks = benchmarks; // Filenames of scheduled benchmarks. 33 // Time when starting to run benchmarks. 36 this.runsPerFile = queue.length / benchmarks.length; 87 const scheduledFiles = this.benchmarks.length;
|
/third_party/node/deps/v8/tools/ |
H A D | try_perf.py | 55 parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.') 82 if not options.benchmarks: 83 print('Please specify the benchmarks to run as arguments.') 86 for benchmark in options.benchmarks: 90 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS)) 107 benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks] 108 cmd.append('-p \'testfilter=[%s]\'' % ','.join(benchmarks))
|
/third_party/protobuf/benchmarks/cpp/ |
H A D | cpp_benchmark.cc | 34 #include "benchmarks.pb.h" 45 using benchmarks::BenchmarkDataset; 198 "this command from the benchmarks/ " in ReadFile() 220 if (dataset.message_name() == "benchmarks.proto3.GoogleMessage1") { in RegisterBenchmarks() 221 RegisterBenchmarksForType<benchmarks::proto3::GoogleMessage1>(dataset); in RegisterBenchmarks() 222 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage1") { in RegisterBenchmarks() 223 RegisterBenchmarksForType<benchmarks::proto2::GoogleMessage1>(dataset); in RegisterBenchmarks() 224 } else if (dataset.message_name() == "benchmarks.proto2.GoogleMessage2") { in RegisterBenchmarks() 225 RegisterBenchmarksForType<benchmarks::proto2::GoogleMessage2>(dataset); in RegisterBenchmarks() 227 "benchmarks in RegisterBenchmarks() [all...] |
/third_party/benchmark/src/ |
H A D | benchmark.cc | 60 // Print a list of benchmarks. This option overrides all other options. 63 // A regular expression that specifies the set of benchmarks to execute. If 64 // this flag is empty, or if this flag is the string \"all\", all benchmarks 85 // taken into account. This e.g can be necessary for benchmarks of code which 95 // If set, enable random interleaving of repetitions of all benchmarks. 101 // repeated benchmarks. Affects all reporters. 106 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects 346 void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks, in RunBenchmarks() argument 356 for (const BenchmarkInstance& benchmark : benchmarks) { in RunBenchmarks() 386 // Vector of benchmarks t in RunBenchmarks() 599 std::vector<internal::BenchmarkInstance> benchmarks; RunSpecifiedBenchmarks() local [all...] |
H A D | benchmark_register.cc | 71 // Class for managing registered benchmarks. Note that each registered 72 // benchmark identifies a family of related benchmarks to run. 86 std::vector<BenchmarkInstance>* benchmarks, 115 std::string spec, std::vector<BenchmarkInstance>* benchmarks, in FindBenchmarks() 160 // family size. this doesn't take into account any disabled benchmarks in FindBenchmarks() 162 if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size); in FindBenchmarks() 174 benchmarks->push_back(std::move(instance)); in FindBenchmarks() 198 std::vector<BenchmarkInstance>* benchmarks, in FindBenchmarksInternal() 200 return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Er in FindBenchmarksInternal() 114 FindBenchmarks( std::string spec, std::vector<BenchmarkInstance>* benchmarks, std::ostream* ErrStream) FindBenchmarks() argument 197 FindBenchmarksInternal(const std::string& re, std::vector<BenchmarkInstance>* benchmarks, std::ostream* Err) FindBenchmarksInternal() argument [all...] |
H A D | benchmark_api_internal.h | 76 std::vector<BenchmarkInstance>* benchmarks,
|
/third_party/python/Tools/importbench/ |
H A D | importbench.py | 172 benchmarks = (from_cache, builtin_mod, 181 for b in benchmarks: 183 benchmarks = [b] 196 len(benchmarks) * seconds * repeat, __import__)) 198 for benchmark in benchmarks: 211 for benchmark in benchmarks:
|
/third_party/json/ |
H A D | Makefile | 43 @echo "run_benchmarks - build and run benchmarks" 56 # benchmarks 60 rm -fr cmake-build-benchmarks 61 mkdir cmake-build-benchmarks 62 cd cmake-build-benchmarks ; cmake ../tests/benchmarks -GNinja -DCMAKE_BUILD_TYPE=Release 63 cd cmake-build-benchmarks ; ninja 64 cd cmake-build-benchmarks ; ./json_benchmarks 250 rm -fr benchmarks/files/numbers/*.json 251 rm -fr cmake-build-benchmarks fuz [all...] |
/third_party/skia/third_party/externals/abseil-cpp/absl/container/internal/ |
H A D | raw_hash_set_probe_benchmark.cc | 41 absl::string_view benchmarks; member 44 return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular; in output() 464 return benchmarks.empty() || benchmarks == "all" in CanRunBenchmark() 466 : new std::regex(std::string(benchmarks)); in CanRunBenchmark() 514 benchmarks = next(); in main() 517 benchmarks = arg; in main() 523 if (benchmarks.empty()) benchmarks="all"; in main() 559 absl::PrintF(" \"benchmarks\" in main() [all...] |
/third_party/protobuf/benchmarks/protobuf.js/ |
H A D | protobufjs_benchmark.js | 25 root.benchmarks.BenchmarkDataset.decode(fs.readFileSync(filename)); 53 benchmarks: {
|
/third_party/jerryscript/tools/runners/ |
H A D | run-benchmarks.sh | 22 ./tools/perf.sh 5 $ENGINE ./tests/benchmarks/$1.js 23 ./tools/rss-measure.sh $ENGINE ./tests/benchmarks/$1.js
|
/third_party/protobuf/benchmarks/js/ |
H A D | js_benchmark.js | 35 proto.benchmarks.BenchmarkDataset.deserializeBinary(fs.readFileSync(filename)); 63 benchmarks: {
|
/third_party/littlefs/ |
H A D | Makefile | 430 ## Run the benchmarks, -j enables parallel benchmarks 435 ## List the benchmarks 440 ## Summarize the benchmarks 441 .PHONY: benchmarks 442 benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded 443 benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv 451 ## Compare benchmarks against a previous run 452 .PHONY: benchmarks-diff 453 benchmarks [all...] |
/third_party/pulseaudio/scripts/ |
H A D | benchmark_memory_usage.sh | 55 BENCHMARKS_DIR=${SCRIPTS_DIR}/benchmarks 122 error "Please provide a large wave file (~ $((MAX_CLIENTS*2))s) then redo the benchmarks"
|
/third_party/typescript/tests/baselines/reference/ |
H A D | parserharness.js | 615 export var benchmarks: { new (): Benchmark; }[] = []; 646 for (var i = 0; i < benchmarks.length; i++) { 647 var b = new benchmarks[i](); 682 benchmarks.push(BenchmarkClass); 2609 Perf.benchmarks = [];
2625 for (var i = 0; i < Perf.benchmarks.length; i++) {
2626 var b = new Perf.benchmarks[i]();
2655 Perf.benchmarks.push(BenchmarkClass);
|
/third_party/python/Lib/test/ |
H A D | re_tests.py | 17 benchmarks = [ variable
|
/third_party/node/ |
H A D | Makefile | 35 TAP_V8_BENCHMARKS := --junitout $(PWD)/v8-benchmarks-tap.xml 44 TAP_V8_BENCHMARKS_JSON := $(PWD)/v8-benchmarks-tap.json 707 .PHONY: test-v8-benchmarks 728 test-v8-benchmarks: v8 731 benchmarks \ 738 test-v8-all: test-v8 test-v8-intl test-v8-benchmarks test-v8-updates 741 test-v8 test-v8-intl test-v8-benchmarks test-v8-all: 1317 $(warning Please use benchmark/run.js or benchmark/compare.js to run the benchmarks.)
|
/third_party/protobuf/ |
H A D | tests.sh | 35 cd benchmarks && make cpp-benchmark && cd ..
|