Source code for kitcar_ml.utils.evaluation.tutorial

from kitcar_ml.utils.bounding_box import BoundingBox
from kitcar_ml.utils.evaluation.interpolation_evaluator import InterpolationEvaluator


[docs]def fake_dataset(): """Simulate the dataset and create the groundtruth.""" # Create 10 images with one bounding box each. bb = [[BoundingBox(x1=0, y1=0, x2=10, y2=10, class_label="1", confidence=1)]] * 10 return bb
[docs]def fake_prediction(): """Simulate a model and create predictions.""" groundtruth = fake_dataset() # Only change the confidence groundtruth[0] = [BoundingBox(0, 0, 10, 10, "1", 0.8)] # Create a large bounding box groundtruth[1] = [BoundingBox(0, 0, 16, 14, "1", 0.7)] # Create two slightly large bounding boxes. groundtruth[2] = [BoundingBox(0, 0, 12, 12, "1", 0.7)] groundtruth[3] = [BoundingBox(0, 0, 8, 8, "1", 0.7)] return groundtruth
if __name__ == "__main__": # Initialize the evaluator. iou_thresholds = (0.3, 0.5, 0.8) interpolation_evaluator = InterpolationEvaluator( iou_thresholds, use_every_point_interpolation=False ) # Load the dataset (for this tutorial we just fake them) groundtruths = fake_dataset() detections = fake_prediction() # Call the evaluator with the groundtruths and the detections. interpolation_evaluator(groundtruths, detections) # Output a summary of the evaluator. print(interpolation_evaluator) # Plot the calculated graphics. This can be different for every evaluator. interpolation_evaluator.plot_precision_recall_curves()