Skip to content

Commit

Permalink
Matsumura/implement test case aggregation (#42)
Browse files Browse the repository at this point in the history
* add output function for overall test results

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* add evaluator results

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* split summary and evaluator results into distinct files

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* retrieve evaluator results only at the end of test cases

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* change file output destination to log directory

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* modify run_test.sh to determine output directory from ROS_LOG_DIR and pass it to run_test.py

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

* fix output directory to use execution location if ROS_LOG_DIR is unset

Signed-off-by: YumaMatsumura-kufusha <[email protected]>

---------

Signed-off-by: YumaMatsumura-kufusha <[email protected]>
  • Loading branch information
YumaMatsumura-kufusha authored Jul 4, 2024
1 parent 975d570 commit c52073a
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 8 deletions.
8 changes: 8 additions & 0 deletions cabot_navigation2/test/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,17 @@ def stop(self):
self._evaluation_timer = None
self.reset()

def get_evaluation_results(self):
results = []
for metric, func in zip(self._metrics_to_compute, self._metrics_func_list):
result = func(self.human_list, self.robot_list)
results.append({"name": metric, "value": result[0]})
return results

def reset(self):
self.robot_list = []
self.human_list = []
self.results = []
self._ready = False

def agents_callback(self, robot, human):
Expand Down
41 changes: 39 additions & 2 deletions cabot_navigation2/test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@
# THE SOFTWARE.
###############################################################################

import os
import importlib
import pkgutil
import inspect
import csv
import sys
import math
import numpy
Expand Down Expand Up @@ -112,8 +114,9 @@ def wrap(*args, **kwargs):


class Tester:
def __init__(self, node):
def __init__(self, node, output_dir):
self.node = node
self.output_dir = output_dir
self.done = False
self.alive = True
self.config = {}
Expand All @@ -128,6 +131,8 @@ def __init__(self, node):
self.set_entity_state_client = self.node.create_client(SetEntityState, '/gazebo/set_entity_state')
self.test_func_name = None
self.result = {}
self.test_summary = {}
self.evaluator_summary = {}
# evaluation
self.evaluator = None

Expand Down Expand Up @@ -163,12 +168,22 @@ def test(self, module, test_pat, wait_ready=False):
logger.info(f"Testing {func}")
self.test_func_name = func
getattr(module, func)(self)
self.evaluator_summary[func] = self.evaluator.get_evaluation_results()
self.stop_evaluation() # automatically stop metric evaluation
success = self.print_result(self.result, func)
self.register_action_result(func, self.result)
self.cancel_subscription(func)
allSuccess = allSuccess and success

if func not in self.test_summary:
self.test_summary[func] = {'success': 0, 'failure': 0}
if success:
self.test_summary[func]['success'] += 1
else:
self.test_summary[func]['failure'] += 1

self.output_test_summary()

logger.info("Done all test")

if allSuccess:
Expand Down Expand Up @@ -198,6 +213,27 @@ def print_result(self, result, key):
logger.info("--------------------------")
return success

def output_test_summary(self):
test_summary_path = os.path.join(self.output_dir, 'test_summary.csv')
test_evaluation_path = os.path.join(self.output_dir, 'test_evaluation_results.csv')

with open(test_summary_path, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Test name", "Number of success", "Number of failure", "Success rate"])
for test_name, counts in self.test_summary.items():
success_count = counts['success']
fail_count = counts['failure']
total_count = success_count + fail_count
success_rate = success_count / total_count if total_count > 0 else 0
writer.writerow([test_name, success_count, fail_count, f"{success_rate:.2f}"])

with open(test_evaluation_path, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Test name", "evaluator", "value"])
for test_name, results in self.evaluator_summary.items():
for result in results:
writer.writerow([test_name, result["name"], result["value"]])

def register_action_result(self, target_function_name, case):
if target_function_name not in self.result:
self.result[target_function_name] = []
Expand Down Expand Up @@ -1029,6 +1065,7 @@ def main():
parser.add_option('-L', '--list-modules', action='store_true', help='list test modules')
parser.add_option('-l', '--list-functions', action='store_true', help='list test function')
parser.add_option('-w', '--wait-ready', action='store_true', help='wait ready')
parser.add_option('-o', '--output-dir', type=str, help='directory where the summary will be output')

(options, args) = parser.parse_args()

Expand Down Expand Up @@ -1067,7 +1104,7 @@ def main():
evaluator = Evaluator(node)
evaluator.set_logger(logger)

tester = Tester(node)
tester = Tester(node, options.output_dir)
tester.set_evaluator(evaluator)
try:
mod = importlib.import_module(options.module)
Expand Down
14 changes: 8 additions & 6 deletions script/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -84,27 +84,29 @@ if [[ $2 != "" ]]; then
test_func_option="-f $2"
fi

output_dir_option="-o ${ROS_LOG_DIR:-$pwd}"

blue "testing with $CABOT_SITE"

source $scriptdir/../install/setup.bash

while [[ 1 -eq 1 ]]; do
if [[ $list_modules -eq 1 ]]; then
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE} -L
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE} -L $output_dir_option
exit
fi

if [[ $list_functions -eq 1 ]]; then
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module -l
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module -l $output_dir_option
exit
fi

if [[ ! -z $debug ]]; then
echo "ros2 run --prefix 'gdb -ex run -ex bt -ex quit --args' cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug"
ros2 run --prefix 'gdb -ex run -ex bt -ex quit --args python3' cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug
echo "ros2 run --prefix 'gdb -ex run -ex bt -ex quit --args' cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug $output_dir_option"
ros2 run --prefix 'gdb -ex run -ex bt -ex quit --args python3' cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug $output_dir_option
else
echo "ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug"
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option
echo "ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $debug $output_dir_option"
ros2 run cabot_navigation2 run_test.py -m ${CABOT_SITE}.$module $test_func_option $wait_ready_option $output_dir_option
fi
result=$?
if [[ $result -le 1 ]]; then
Expand Down

0 comments on commit c52073a

Please sign in to comment.