Source code for kitcar_ml.utils.evaluation.test.test_simple_evaluator

import random

from kitcar_ml.utils.bounding_box import BoundingBox
from kitcar_ml.utils.evaluation.simple_evaluator import SimpleEvaluator
from kitcar_ml.utils.evaluation.test.basic_utility import create_bounding_boxes


[docs]def test_precision(): print("Test Precision") evaluator = SimpleEvaluator() assert evaluator.precision(0, 0) == 0 assert evaluator.precision(1, 0) == 1
[docs]def test_recall(): print("Test Recall") evaluator = SimpleEvaluator() assert evaluator.recall(0, 1) == 0 assert evaluator.recall(1, 0) == 1
[docs]def test_m_ap(): print("Test mAP") accuracy = round(random.uniform(0.1, 1), 1) groundtruth, detections, accuracy = create_bounding_boxes(accuracy=accuracy) evaluator = SimpleEvaluator() evaluator(groundtruth, detections) assert ( evaluator.average_precision == accuracy ), f"mAP should be calculated to {accuracy} but was {evaluator.average_precision}"
[docs]def test_edge_cases(): print("Test Edge Cases") evaluator = SimpleEvaluator() b1 = BoundingBox(0, 0, 1, 1, "test class 1") # No Detections gts = [[b1], []] dets = [[], []] evaluator(gts, dets) assert evaluator.average_precision == 0 assert evaluator.average_recall == 0 gts = [[], []] dets = [[], []] evaluator(gts, dets) assert evaluator.average_precision == 1 assert evaluator.average_recall == 1 # Wrong Detections gts = [[b1], []] dets = [[], [b1]] evaluator(gts, dets) assert evaluator.average_precision == 0 assert evaluator.average_recall == 0 gts = [[]] dets = [[b1]] evaluator(gts, dets) assert evaluator.average_precision == 0 assert evaluator.average_recall == 1
[docs]def main(): print("Test Simple Evaluator") test_precision() test_recall() test_m_ap() test_edge_cases()
if __name__ == "__main__": main()