1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3
4"""
5Copyright (c) 2023 Huawei Device Co., Ltd.
6Licensed under the Apache License, Version 2.0 (the "License");
7you may not use this file except in compliance with the License.
8You may obtain a copy of the License at
9
10    http://www.apache.org/licenses/LICENSE-2.0
11
12Unless required by applicable law or agreed to in writing, software
13distributed under the License is distributed on an "AS IS" BASIS,
14WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15See the License for the specific language governing permissions and
16limitations under the License.
17"""
18
19import argparse
20import math
21import os
22import time
23from config import COMP, DEFAULT_RUNS, PARSE_ONLY, OPT_LEVEL, RUNS_NUM, DEFAULT_PARAMS, CUR_FILE_DIR, \
24    DEFAULT_TESTCASES_DIR, SELECTED_PARAMETERS, JS_FILE_SUFFIX, TS_FILE_SUFFIX, REPORT_DIR, TEST_RESULT_FILE, \
25    DEFAULT_OUTPUT_DIR, ABC_FILE_SUFFIX, DEFAULT_HERMES_PARAMS, DEFAULT_HERMES_PATH, HERMES_FILE_SUFFIX, \
26    HERMES_CODE_PATH, ES2ABC, HERMES
27from utils import traverse_dir, write_result, write_html, run_cmd_cwd, clear_folder_shutil, pull_cases, remove_dir, \
28    pull_build_hermes
29
30
31def parse_args():
32    parser = argparse.ArgumentParser()
33    parser.add_argument('--es2abc-tool',
34                        required=True,
35                        help="es2abc tool path")
36    parser.add_argument('--runs', required=False, type=int,
37                        default=DEFAULT_RUNS,
38                        help='Number of runs')
39    parser.add_argument('--opt-level', required=False, choices=['0', '1', '2'],
40                        help="Specifies the compiler optimization level")
41    parser.add_argument('--parse-only', required=False,
42                        default=False, action='store_true',
43                        help="During the test, only the input file is parsed")
44    parser.add_argument('--engine-comp', required=False,
45                        default=False, action='store_true',
46                        help="Compared to the other engine")
47    arguments = parser.parse_args()
48    return arguments
49
50
51class Es2AbcBenchMarks:
52    def __init__(self, args):
53        self.args = args
54        self.results = []
55        self.cmds = {}
56        self.select_params = []
57
58    def parse_args(self):
59        cmds = {ES2ABC: [self.args.es2abc_tool]}
60        if self.args.parse_only and not self.args.engine_comp:
61            cmds[ES2ABC].append('--parse-only')
62            self.select_params.append(f'{PARSE_ONLY}:True')
63        else:
64            self.select_params.append(f'{PARSE_ONLY}:False')
65        if self.args.opt_level and not self.args.engine_comp:
66            cmds[ES2ABC].append(f'--opt-level {self.args.opt_level}')
67            self.select_params.append(f'{OPT_LEVEL}:{self.args.opt_level}')
68        else:
69            self.select_params.append(f'{OPT_LEVEL}:0')
70        self.select_params.append(f'{RUNS_NUM}:{self.args.runs}')
71        cmds[ES2ABC] += DEFAULT_PARAMS
72        if self.args.engine_comp:
73            cmds[HERMES] = [DEFAULT_HERMES_PATH] + DEFAULT_HERMES_PARAMS
74            self.select_params.append(f'{COMP}:True')
75        else:
76            self.select_params.append(f'{COMP}:False')
77        self.cmds = cmds
78
79    def run(self):
80        test_case_path = os.path.join(CUR_FILE_DIR, DEFAULT_TESTCASES_DIR)
81        test_cases_files = traverse_dir(test_case_path)
82        self.parse_args()
83        html_data = {}
84        selected_params = f'{SELECTED_PARAMETERS}: {"; ".join(self.select_params)}'
85        write_result(selected_params)
86        for dir_name, file_paths in test_cases_files.items():
87            case_dir_name = os.path.basename(dir_name)
88            case_info_html_data = {}
89            case_info_path = os.path.join(REPORT_DIR, f'{case_dir_name}.html')
90            print(f"---------------------------{case_dir_name}--------------------------------")
91            for engine_type in self.cmds:
92                print(f">>>engine {engine_type}")
93                case_execution_time_sum, means = self.run_benchmark(file_paths, engine_type, case_dir_name,
94                                                                    case_info_html_data)
95                print(f">>>Done. \n")
96                case_execution_time_sum_avg = str(case_execution_time_sum)
97                score_sum = self.args.runs / self.gen_score(means)
98                test_result = [f'{case_dir_name}', case_execution_time_sum_avg, f'{score_sum * 100}'.split('.')[0],
99                               case_info_path, self.args.runs]
100                if engine_type in html_data:
101                    html_data[engine_type].append(test_result)
102                else:
103                    html_data[engine_type] = [test_result]
104            write_html(case_info_html_data, case_info_path, selected_params, info=True)
105            print("-------------------------------------------------------------------\n")
106        write_html(html_data, os.path.join(REPORT_DIR, TEST_RESULT_FILE), selected_params, info=False)
107
108    def run_benchmark(self, file_paths, engine_type, case_dir_name, case_info_html_data):
109        case_execution_time_sum = 0
110        means = []
111        for file_path in file_paths:
112            file_name = os.path.basename(file_path)
113            if not (file_name.endswith(JS_FILE_SUFFIX) or file_name.endswith(TS_FILE_SUFFIX)):
114                continue
115            print(f'Running {file_name.replace(JS_FILE_SUFFIX, "").replace(TS_FILE_SUFFIX, "")} ')
116            case_execution_time, case_execution_times = self.run_single_benchmark(file_path, engine_type)
117            case_execution_time_ms = str(case_execution_time * 1000).split(".")[0]
118            case_execution_time_sum += int(case_execution_time_ms)
119            mean = self.gen_score(case_execution_times)
120            means.append(mean)
121            score = self.args.runs / mean
122            score = f'{score * 100}'.split('.')[0]
123            log_str = (f'engine {engine_type} case: {os.path.join(case_dir_name, file_name)} number of runs: '
124                       f'{self.args.runs} avg time: {case_execution_time_ms}ms  Score:{score}\n')
125            single_case_result = [f'{os.path.join(case_dir_name, file_name)}', case_execution_time_ms, score,
126                                  self.args.runs]
127            if engine_type in case_info_html_data:
128                case_info_html_data[engine_type].append(single_case_result)
129            else:
130                case_info_html_data[engine_type] = [single_case_result]
131            write_result(log_str)
132        return case_execution_time_sum, means
133
134    def run_single_benchmark(self, file_path, engine_type):
135        elapsed_sum = 0
136        case_execution_times = []
137        new_cmds = [i for i in self.cmds[engine_type]]
138        output_file = os.path.join(DEFAULT_OUTPUT_DIR, os.path.basename(file_path).
139                                   replace(JS_FILE_SUFFIX, ABC_FILE_SUFFIX).
140                                   replace(TS_FILE_SUFFIX, ABC_FILE_SUFFIX))
141        if engine_type == ES2ABC:
142            new_cmds += [file_path, "--output", output_file]
143        else:
144            new_cmds += [output_file.replace(ABC_FILE_SUFFIX, HERMES_FILE_SUFFIX), file_path]
145        for _ in range(self.args.runs):
146            start = time.time()
147            run_cmd_cwd(new_cmds)
148            elapsed_sum += time.time() - start
149            case_execution_times.append(elapsed_sum)
150        return elapsed_sum / self.args.runs, case_execution_times
151
152    @staticmethod
153    def gen_score(numbers):
154        log = 0
155        for num in numbers:
156            log += math.log(num)
157        mean = math.exp(log / len(numbers))
158        return mean
159
160
161def prepare_and_run(args):
162    clear_folder_shutil(DEFAULT_OUTPUT_DIR)
163    clear_folder_shutil(REPORT_DIR)
164    clear_folder_shutil(DEFAULT_TESTCASES_DIR)
165    pull_cases_success = pull_cases()
166    if pull_cases_success:
167        abc_runner = Es2AbcBenchMarks(args)
168        abc_runner.run()
169        remove_dir(DEFAULT_OUTPUT_DIR)
170    print("> > > Done.")
171
172
173def main():
174    args = parse_args()
175    print("> > > benchmark running")
176    if args.engine_comp:
177        clear_folder_shutil(HERMES_CODE_PATH)
178        print("Building Hermes...")
179        build_result = pull_build_hermes()
180        if not build_result:
181            raise Exception('build hermes failed.')
182    prepare_and_run(args)
183
184
185if __name__ == "__main__":
186    main()
187