1#!/usr/bin/env python3 2# coding=utf-8 3 4# 5# Copyright (c) 2020 Huawei Device Co., Ltd. 6# Licensed under the Apache License, Version 2.0 (the "License"); 7# you may not use this file except in compliance with the License. 8# You may obtain a copy of the License at 9# 10# http://www.apache.org/licenses/LICENSE-2.0 11# 12# Unless required by applicable law or agreed to in writing, software 13# distributed under the License is distributed on an "AS IS" BASIS, 14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15# See the License for the specific language governing permissions and 16# limitations under the License. 17# 18 19import json 20import os 21import shutil 22import sys 23import stat 24 25FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL 26MODES = stat.S_IWUSR | stat.S_IRUSR 27 28SETTING_RED_STYLE = """\033[33;31m%s\033[0m""" 29 30 31def load_json_data(json_file_path): 32 json_data = {} 33 if os.path.isfile(json_file_path): 34 try: 35 with open(json_file_path, 'r') as file_read: 36 json_data = json.load(file_read) 37 if not json_data: 38 print("Loading file \"%s\" error" % json_file_path) 39 return {} 40 except(IOError, ValueError) as err_msg: 41 print("Error for load_json_data: ", json_file_path) 42 else: 43 print("Info: \"%s\" not exist." % json_file_path) 44 return json_data 45 46 47def get_file_list(find_path, postfix): 48 file_names = os.listdir(find_path) 49 file_list = [] 50 if len(file_names) > 0: 51 for name in file_names: 52 if name.find(postfix) != -1 and name[-len(postfix):] == postfix: 53 file_list.append(name) 54 return file_list 55 56 57def get_file_list_by_postfix(path, postfix, filter_jar=""): 58 file_list = [] 59 for dirs in os.walk(path): 60 files = get_file_list(find_path=dirs[0], postfix=postfix) 61 for file_path in files: 62 if "" != file_path and -1 == file_path.find(__file__): 63 pos = file_path.rfind(os.sep) 64 file_name = file_path[pos + 1:] 65 file_path = os.path.join(dirs[0], file_path) 66 if filter_jar != "" and file_name == filter_jar: 67 print(SETTING_RED_STYLE % ("Skipped %s" % file_path)) 68 continue 69 file_list.append(file_path) 70 return file_list 71 72 73class BenchmarkReport(object): 74 SUBSYSTEM_SUMMARY = "OHOS_SUBSYSTEM_SUMMARY" 75 ENABLE_LINK = "OHOS_ENABLE_PASSCASE_LINK" 76 REPORT_SUMMARY = "OHOS_REPORT_SUMMARY" 77 LEGEND_DATA = "OHOS_LEGEND_DATA" 78 XAXIS_DATA = "OHOS_XAXIS_DATA" 79 SERIES_DATA = "OHOS_TITLE_DATA" 80 TITLE_TEXT = "OHOS_TITLE_TEST" 81 YAXIS_FORMATTER = "OHOS_YAXIS_FORMATTER" 82 83 84 def __init__(self): 85 self.index = 0 86 self.filtered = ["detail", "id", "pm", "owner", 87 "Count", "ScoreUnit", "Variance"] 88 self.default_item = [] 89 self.max_index = 1000 90 self.sbs_mdl_summary_list = [] 91 self.benchmark_list = [] 92 self._init_default_item() 93 94 def generate_benchmark(self, args): 95 if args is None or len(args) <= 2: 96 print(SETTING_RED_STYLE % 97 "Error: source_dir and report_dir can't be empty") 98 return 99 100 src_path = sys.argv[1] 101 dest_path = os.path.abspath(sys.argv[2]) 102 103 print("source_dir: %s" % src_path) 104 print("report_dir: %s" % dest_path) 105 106 if not os.path.exists(src_path): 107 print("%s not exists" % src_path) 108 return 109 110 if os.path.exists(dest_path): 111 shutil.rmtree(dest_path) 112 113 self._get_benchmark_result_data(src_path) 114 self._generate_benchmark_summary_report(os.path.abspath(dest_path)) 115 self._generate_all_benchmark_detail(os.path.abspath(dest_path)) 116 117 def _init_default_item(self): 118 self.default_item.append("Subsystem") 119 self.default_item.append("Module") 120 self.default_item.append("Testsuit") 121 self.default_item.append("Benchmark") 122 self.default_item.append("Mode") 123 self.default_item.append("RunType") 124 self.default_item.append("TestTargetName") 125 self.default_item.append("TestTargetMethod") 126 self.default_item.append("Repetitions") 127 self.default_item.append("RepetitionIndex") 128 self.default_item.append("Threads") 129 self.default_item.append("Iterations") 130 self.default_item.append("Score") 131 self.default_item.append("CpuTime") 132 self.max_index = len(self.default_item) + 1000 133 134 def _remove_iterations(self, mdl_summary_list): 135 final_mdl_summary = [] 136 for item_info in mdl_summary_list: 137 copy_item = item_info.copy() 138 copy_item.pop("Iterations") 139 final_mdl_summary.append(copy_item) 140 return final_mdl_summary 141 142 def _get_benchmark_result_data(self, src_path): 143 self.benchmark_list = [] 144 self.sbs_mdl_summary_list = [] 145 system_summary_dic = {} 146 json_files = get_file_list_by_postfix(src_path, ".json") 147 print("json_files %s" % json_files) 148 for json_file in json_files: 149 pos = json_file.find(src_path) 150 subsystem_root = json_file[pos + len(src_path):] 151 dir_list = subsystem_root.split(os.sep) 152 sbs_name = dir_list[1] 153 module_name = dir_list[2] 154 testsuit_name = dir_list[len(dir_list) - 2] 155 156 print(SETTING_RED_STYLE % ( 157 "subsystem_root: %s \n\n" 158 "subsystem_name: %s \n\n" 159 "module_name: %s \n\n" 160 "testsuit_name: %s \n\n" % 161 (subsystem_root, str(sbs_name), 162 str(module_name), str(testsuit_name)))) 163 164 mdl_summary_list = self._get_subsystem_cxx_benchmark(sbs_name, 165 module_name, testsuit_name, json_file) 166 self.benchmark_list.extend(mdl_summary_list) 167 168 if sbs_name in system_summary_dic.keys() \ 169 and testsuit_name in system_summary_dic[sbs_name].keys(): 170 subsystem_summary_dic = \ 171 system_summary_dic[sbs_name][testsuit_name] 172 subsystem_summary_dic["children"] += \ 173 self._remove_iterations(mdl_summary_list) 174 else: 175 self.index += 1 176 subsystem_summary_dic = dict() 177 subsystem_summary_dic["id"] = self.index 178 subsystem_summary_dic["Subsystem"] = sbs_name 179 subsystem_summary_dic["Testsuit"] = testsuit_name 180 subsystem_summary_dic["Module"] = "---" 181 subsystem_summary_dic["Detail"] = "" 182 subsystem_summary_dic["TestTargetName"] = "---" 183 subsystem_summary_dic["TestTargetMethod"] = "---" 184 subsystem_summary_dic["RunType"] = "---" 185 subsystem_summary_dic["Benchmark"] = "---" 186 subsystem_summary_dic["Mode"] = "---" 187 subsystem_summary_dic["Count"] = "---" 188 subsystem_summary_dic["Score"] = "---" 189 subsystem_summary_dic["ScoreUnit"] = "---" 190 subsystem_summary_dic["children"] = [] 191 subsystem_summary_dic["children"] += \ 192 self._remove_iterations(mdl_summary_list) 193 self.sbs_mdl_summary_list.append(subsystem_summary_dic) 194 system_summary_dic[sbs_name] = {} 195 system_summary_dic[sbs_name][testsuit_name] = \ 196 subsystem_summary_dic 197 subsystem_summary_dic["pm"] = "unknown" 198 subsystem_summary_dic["owner"] = "unknown" 199 200 def _get_subsystem_cxx_benchmark(self, sbs_name, module_name, 201 testsuit_name, json_file): 202 sbs_mdl_summary_list = list() 203 json_data_dic = load_json_data(json_file) 204 for json_data in json_data_dic.get("benchmarks", []): 205 self.index += 1 206 sbs_mdl_summary = dict() 207 sbs_mdl_summary["id"] = self.index 208 sbs_mdl_summary["Subsystem"] = sbs_name 209 sbs_mdl_summary["Module"] = module_name 210 sbs_mdl_summary["Testsuit"] = testsuit_name 211 sbs_mdl_summary["pm"] = "unknown" 212 sbs_mdl_summary["owner"] = "unknown" 213 214 benchmark_name = json_data.get("name", "").replace("/", "_"). \ 215 replace(":", "_") 216 test_target = benchmark_name.split("_")[0] 217 sbs_mdl_summary["TestTargetName"] = test_target 218 sbs_mdl_summary["TestTargetMethod"] = "%s()" % test_target 219 sbs_mdl_summary["RunType"] = str(json_data.get("run_type", "")) 220 sbs_mdl_summary["Mode"] = \ 221 str(json_data.get("aggregate_name", "normal")) 222 sbs_mdl_summary["Benchmark"] = benchmark_name 223 sbs_mdl_summary["Repetitions"] = json_data.get("repetitions", 0) 224 sbs_mdl_summary["RepetitionIndex"] = \ 225 json_data.get("repetition_index", 0) 226 sbs_mdl_summary["Threads"] = json_data.get("threads", 0) 227 sbs_mdl_summary["Iterations"] = json_data.get("iterations", 0) 228 229 score_unit = json_data.get("time_unit", "") 230 sbs_mdl_summary["ScoreUnit"] = score_unit 231 sbs_mdl_summary["CpuTime"] = "%.2e %s " % ( 232 json_data.get("cpu_time", 0), 233 score_unit 234 ) 235 sbs_mdl_summary["Score"] = "%.2e %s " % ( 236 json_data.get("real_time", 0), 237 score_unit 238 ) 239 sbs_mdl_summary["detail"] = "Link" 240 sbs_mdl_summary_list.append(sbs_mdl_summary) 241 return sbs_mdl_summary_list 242 243 def _generate_benchmark_summary_report(self, dest_dir_path): 244 tmpl_file_path = os.path.abspath(os.path.join( 245 os.path.dirname(__file__), 246 "..", "template", "benchmark_summary.html")) 247 if not os.path.exists(os.path.dirname(tmpl_file_path)): 248 print(SETTING_RED_STYLE % 249 ("Warning: %s not exists" % tmpl_file_path)) 250 return 251 252 out_report_file_path = os.path.join(dest_dir_path, "index.html") 253 if not os.path.exists(os.path.dirname(out_report_file_path)): 254 os.makedirs(os.path.dirname(out_report_file_path)) 255 256 if os.path.exists(tmpl_file_path): 257 try: 258 with open(os.path.abspath(tmpl_file_path), "r+") as file_read: 259 report_content = file_read.read() 260 file_read.close() 261 content_new = report_content 262 263 pos = content_new.find(BenchmarkReport.SUBSYSTEM_SUMMARY) 264 if pos >= 0: 265 content_new = \ 266 content_new[0:pos] + \ 267 str(self.sbs_mdl_summary_list) + \ 268 content_new[pos + 269 len(BenchmarkReport.SUBSYSTEM_SUMMARY): 270 len(content_new)] 271 272 try: 273 if os.path.exists(os.path.abspath(out_report_file_path)): 274 os.remove(os.path.abspath(out_report_file_path)) 275 with os.fdopen(os.open(os.path.abspath(out_report_file_path), 276 FLAGS, MODES), 'w') as output_fd: 277 content_new = str(content_new) 278 output_fd.write(content_new) 279 except IOError as err_msg: 280 print("Error5 for open failed:", out_report_file_path) 281 except IOError as err_msg: 282 print("Error6 for open failed", tmpl_file_path) 283 284 def _generate_all_benchmark_detail(self, dest_dir_parh): 285 for benchmark_info in self.benchmark_list: 286 self._generate_benchmark_detail(benchmark_info, 287 os.path.abspath(dest_dir_parh)) 288 289 def _is_filtered_id(self, item_key): 290 if item_key in self.filtered: 291 return True 292 return False 293 294 def _get_index_id(self, item_key): 295 pos = self.default_item.index(item_key) 296 if pos != -1: 297 return pos + 1 298 else: 299 self.max_index -= 1 300 return self.max_index 301 302 def _generate_benchmark_detail(self, benchmark_info, dest_dir_path): 303 report_tmpl_file_path = os.path.abspath( 304 os.path.join(os.path.dirname(__file__), 305 "..", "template", "benchmark_detail.html")) 306 if not os.path.exists(os.path.dirname(report_tmpl_file_path)): 307 print(SETTING_RED_STYLE % 308 ("Warning: %s not exists" % report_tmpl_file_path)) 309 return 310 311 out_report_file_path = os.path.join(os.path.abspath(dest_dir_path), 312 str(benchmark_info["Subsystem"]), 313 str(benchmark_info["Module"]), 314 str(benchmark_info["Testsuit"]), 315 str(benchmark_info["Benchmark"]) 316 + "_" 317 + str(benchmark_info["Mode"]) 318 + "_detail.html") 319 if not os.path.exists(os.path.dirname(out_report_file_path)): 320 os.makedirs(os.path.dirname(out_report_file_path)) 321 322 detail_info = self._get_detail_info(benchmark_info) 323 324 if os.path.exists(report_tmpl_file_path): 325 try: 326 with open(os.path.abspath(report_tmpl_file_path), "r+") \ 327 as file_read: 328 report_content = file_read.read() 329 file_read.close() 330 content_new = report_content 331 content_new = \ 332 self._update_report_summary(content_new, detail_info) 333 334 try: 335 if os.path.exists(os.path.abspath(out_report_file_path)): 336 os.remove(os.path.abspath(out_report_file_path)) 337 with os.fdopen(os.open(os.path.abspath(out_report_file_path), 338 FLAGS, MODES), 'w') as output_fd: 339 output_fd.write(content_new) 340 except IOError as err_msg: 341 print("Error5 for open failed", out_report_file_path) 342 except IOError as err_msg: 343 print("Error6 for open failed", report_tmpl_file_path) 344 345 def _get_detail_info(self, benchmark_info): 346 detail_info = [] 347 self.max_index = 1000 348 for item_key, item_value in benchmark_info.items(): 349 if self._is_filtered_id(item_key): 350 continue 351 352 item_info = {"item": item_key, 353 "id": self._get_index_id(item_key), 354 "content": item_value.decode("UTF-8") 355 if isinstance(item_value, bytes) else item_value} 356 detail_info.append(item_info) 357 detail_info = sorted(detail_info, key=lambda s: s["id"]) 358 dest_detail_info = [] 359 index = 1 360 for item in detail_info: 361 item["id"] = index 362 dest_detail_info.append(item) 363 index += 1 364 return dest_detail_info 365 366 def _update_report_summary(self, content_new, detail_info): 367 pos = content_new.find(BenchmarkReport.REPORT_SUMMARY) 368 if pos >= 0: 369 content_new = \ 370 content_new[0:pos] + \ 371 str(detail_info) + \ 372 content_new[pos + 373 len(BenchmarkReport.REPORT_SUMMARY): 374 len(content_new)] 375 return content_new 376 377if __name__ == '__main__': 378 print("****************** Benchmark Report Starting ******************") 379 BenchmarkReport().generate_benchmark(sys.argv) 380 print("****************** Benchmark Report Finished ******************")