Skip to content

Commit

Permalink
Added HTML reporting feature, switch --report-html FILENAME to single…
Browse files Browse the repository at this point in the history
…test.py so now users can export test results to external HTML file. This feature can be used to display HTML reports on CI dashboards
  • Loading branch information
PrzemekWirkus committed Sep 25, 2014
1 parent 95519c1 commit 9fe239d
Show file tree
Hide file tree
Showing 4 changed files with 213 additions and 11 deletions.
2 changes: 1 addition & 1 deletion workspace_tools/build_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ def static_analysis_scan_lib(lib_id, target, toolchain, cppcheck_cmd, cppcheck_m
lib.dependencies, options,
verbose=verbose, clean=clean, macros=macros, notify=notify, jobs=jobs)
else:
print 'Library "%s" is not yet supported on target %s with toolchain %s' % (lib_id, target.name, toolchain)
print 'Library "%s" is not yet supported on target %s with toolchain %s'% (lib_id, target.name, toolchain)


def static_analysis_scan_library(src_paths, build_path, target, toolchain_name, cppcheck_cmd, cppcheck_msg_format,
Expand Down
1 change: 1 addition & 0 deletions workspace_tools/singletest.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ def get_version():
_clean=opts.clean,
_opts_db_url=opts.db_url,
_opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name,
_test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests,
Expand Down
62 changes: 52 additions & 10 deletions workspace_tools/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,9 @@ def __init__(self, single_test):
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed = self.single_test.execute()
test_summary, shuffle_seed, test_summary_ext = self.single_test.execute()
elapsed_time = time() - start

# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
Expand Down Expand Up @@ -139,6 +140,7 @@ def __init__(self,
_clean=False,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
Expand Down Expand Up @@ -184,6 +186,7 @@ def __init__(self,
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
Expand Down Expand Up @@ -267,6 +270,9 @@ def execute(self):

# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}

# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
Expand Down Expand Up @@ -413,17 +419,28 @@ def execute(self):
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
# read MUTs, test specification and perform tests
single_test_result = self.handle(test_spec, target, toolchain, test_loops=test_loops)
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)

# Append test results to global test summary
if single_test_result is not None:
test_summary.append(single_test_result)

# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in test_summary_ext:
test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in test_summary_ext[toolchain]:
test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in test_summary_ext[toolchain][target]:
test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it


if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();

return test_summary, self.shuffle_random_seed
return test_summary, self.shuffle_random_seed, test_summary_ext

def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
Expand Down Expand Up @@ -710,10 +727,11 @@ def handle(self, test_spec, target_name, toolchain_name, test_loops=1):

# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }

for test_index in range(test_loops):
# Choose one method of copy files to mbed virtual drive
#_copy_res, _err_msg, _copy_method = self.file_copy_method_selector(image_path, disk, self.opts_copy_method, image_dest=image_dest)

_copy_res, _err_msg, _copy_method = self.image_copy_method_selector(target_name, image_path, disk, selected_copy_method,
images_config, image_dest)

Expand Down Expand Up @@ -745,6 +763,19 @@ def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
# Store test result
test_all_result.append(single_test_result)
elapsed_time = time() - start_host_exec_time

detailed_test_results[test_index] = {
"single_test_result" : single_test_result,
"single_test_output" : single_test_output,
"target_name" : target_name,
"toolchain_name" : toolchain_name,
"test_id" : test_id,
"test_description" : test_description,
"elapsed_time" : elapsed_time,
"duration" : duration,
"copy_method" : _copy_method,
}

print self.print_test_result(single_test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration)

Expand All @@ -771,7 +802,7 @@ def handle(self, test_spec, target_name, toolchain_name, test_loops=1):

return (self.shape_global_test_loop_result(test_all_result), target_name, toolchain_name,
test_id, test_description, round(elapsed_time, 2),
duration, self.shape_test_loop_ok_result_count(test_all_result))
duration, self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results

def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
Expand Down Expand Up @@ -1206,8 +1237,15 @@ def singletest_in_cli_mode(single_test):
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed = single_test.execute()
test_summary, shuffle_seed, test_summary_ext = single_test.execute()
elapsed_time = time() - start

if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
from workspace_tools.test_exporters import exporter_html
with open(single_test.opts_report_html_file_name, 'w') as f:
f.write(exporter_html(test_summary_ext))

# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
Expand Down Expand Up @@ -1300,7 +1338,7 @@ def __init__(self, store_log=True):
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file

def log_line(self, LogType, log_line):
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
Expand Down Expand Up @@ -1339,8 +1377,8 @@ def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as file:
file.write(log_line_str + line_delim)
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
Expand Down Expand Up @@ -1536,6 +1574,10 @@ def get_default_test_options_parser():
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')

parser.add_option('', '--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')

parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
Expand Down
159 changes: 159 additions & 0 deletions workspace_tools/test_exporters.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""

from workspace_tools.utils import construct_enum


ResultExporterType = construct_enum(JSON='Json_Exporter',
TEXT='Text_Exporter',
HTML='Html_Exporter')


def exporter_factory(result_exporter_type):
pass


def exporter_html(test_result_ext):
"""
Parameter 'test_result_ext' format:
u'uARM': { u'LPC1768': { 'MBED_2': { 0: { 'copy_method': 'shutils.copy()',
'duration': 20,
'elapsed_time': 1.7929999828338623,
'single_test_output': 'Host test instrumentation on ...\r\n',
'single_test_result': 'OK',
'target_name': u'LPC1768',
'test_description': 'stdio',
'test_id': u'MBED_2',
'toolchain_name': u'uARM'}},
"""
CSS_STYLE = """<style>
.name{
border: 1px solid;
border-radius: 25px;
width: 100px;
}
.tooltip{
position:absolute;
background-color: #F5DA81;
display:none;
}
</style>
"""

JAVASCRIPT = """
<script type="text/javascript">
function show (elem) {
elem.style.display = "block";
}
function hide (elem) {
elem.style.display = "";
}
</script>
"""

def get_tooltip_name(toolchain, target, test_id, loop_no):
return "target_test_%s_%s_%s_%d"% (toolchain.lower(), target.lower(), test_id.lower(), loop_no)

def get_result_div_sections(test, test_no):

RESULT_COLORS = {'OK' : 'LimeGreen',
'FAIL' : 'Orange',
'ERROR' : 'LightCoral',
}

tooltip_name = get_tooltip_name(test['toolchain_name'], test['target_name'], test['test_id'], test_no)
background_color = RESULT_COLORS[test['single_test_result'] if test['single_test_result'] in RESULT_COLORS else 'ERROR']
result_div_style = "background-color: %s"% background_color
result = """ <div class="name" style="%s" onmouseover="show(%s)" onmouseout="hide(%s)">
<center>%s</center>
<div class = "tooltip" id= "%s">
<b>%s</b> in <b>%.2f sec</b><br />
<hr />
<small>
%s
</small>
</div>
</div>
"""% (result_div_style,
tooltip_name,
tooltip_name,
test['single_test_result'],
tooltip_name,
test['test_description'],
test['elapsed_time'],
test['single_test_output'].replace('\n', '<br />'))
return result

def get_result_tree(test_results):
result = '<table>'
test_ids = sorted(test_results.keys())
for test_no in test_ids:
test = test_results[test_no]
result += """<tr>
<td valign="top">%s</td>
</tr>"""% get_result_div_sections(test, test_no)
result += '</table>'
return result

def get_all_unique_test_ids(test_result_ext):
result = []
toolchains = test_result_ext.keys()
for toolchain in toolchains:
targets = test_result_ext[toolchain].keys()
for target in targets:
tests = test_result_ext[toolchain][target].keys()
result.extend(tests)
return sorted(list(set(result)))

result = """<html>
<head>
<title>mbed SDK test suite test result report</title>
%s
%s
</head>
<body>
"""% (CSS_STYLE, JAVASCRIPT)

unique_test_ids = get_all_unique_test_ids(test_result_ext)

toolchains = sorted(test_result_ext.keys())
for toolchain in toolchains:
result += '<h2>%s</h2>'% toolchain
targets = sorted(test_result_ext[toolchain].keys())
result += '<table><tr>'
for target in targets:
result += '<td valign="center">%s</td>'% (target)

tests = sorted(test_result_ext[toolchain][target].keys())
for test in unique_test_ids:
result += """<td align="center">%s</td>"""% test
result += """</tr>
<tr>
<td></td>
"""

for test in unique_test_ids:
test_result = get_result_tree(test_result_ext[toolchain][target][test]) if test in tests else ''
result += '<td>%s</td>'% (test_result)

result += '</tr>'
result += '</table>'
result += '</body></html>'
return result

0 comments on commit 9fe239d

Please sign in to comment.