From 4d1383a9b84ef3db0e8bc5ce9e8aed495605bc6a Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 17 Jun 2024 15:12:57 +0100 Subject: [PATCH 01/52] adding config file, load from checkpoint --- .../config/inference_config.yaml | 11 ++ crabs/detection_tracking/inference_model.py | 119 ++++++++---------- crabs/detection_tracking/tracking_utils.py | 5 +- 3 files changed, 63 insertions(+), 72 deletions(-) create mode 100644 crabs/detection_tracking/config/inference_config.yaml diff --git a/crabs/detection_tracking/config/inference_config.yaml b/crabs/detection_tracking/config/inference_config.yaml new file mode 100644 index 00000000..a8fad266 --- /dev/null +++ b/crabs/detection_tracking/config/inference_config.yaml @@ -0,0 +1,11 @@ +iou_threshold: 0.1 +score_threshold: 0.1 +# Maximum number of frames to keep alive a track without associated detections. +max_age: 10 +# Minimum number of associated detections before track is initialised +min_hits: 1 +# save video inference +save_video: True +# Save predicted tracks in VIA csv format and export corresponding frames +# This is useful to prepare for manual labelling of tracks +save_csv_and_frames: False diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 15d396ec..7a3bd3a6 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -8,12 +8,15 @@ import numpy as np import torch import torchvision.transforms.v2 as transforms +import yaml # type: ignore from sort import Sort +from crabs.detection_tracking.models import FasterRCNN from crabs.detection_tracking.tracking_utils import ( evaluate_mota, get_ground_truth_data, save_frame_and_csv, + write_tracked_bbox_to_csv, ) from crabs.detection_tracking.visualization import ( draw_bbox, @@ -36,26 +39,27 @@ class DetectorInference: The command-line arguments provided. vid_path : str The path to the input video. - iou_threshold : float - The iou threshold for tracking. - score_threshold : float - The score confidence threshold for tracking. sort_tracker : Sort An instance of the sorting algorithm used for tracking. """ def __init__(self, args: argparse.Namespace) -> None: self.args = args + self.config_file = args.config_file self.vid_path = args.vid_path - self.score_threshold = args.score_threshold - self.iou_threshold = args.iou_threshold - self.sort_tracker = Sort( - max_age=args.max_age, - min_hits=args.min_hits, - iou_threshold=self.iou_threshold, - ) + self.video_file_root = f"{Path(self.vid_path).stem}" self.trained_model = self.load_trained_model() + self.load_config_yaml() + self.sort_tracker = Sort( + max_age=self.config["max_age"], + min_hits=self.config["min_hits"], + iou_threshold=self.config["iou_threshold"], + ) + + def load_config_yaml(self): + with open(self.config_file, "r") as f: + self.config = yaml.safe_load(f) def load_trained_model(self) -> torch.nn.Module: """ @@ -65,12 +69,11 @@ def load_trained_model(self) -> torch.nn.Module: ------- torch.nn.Module """ - model = torch.load( - self.args.model_dir, - map_location=torch.device(self.args.accelerator), - ) - model.eval() - return model + # Get trained model + trained_model = FasterRCNN.load_from_checkpoint(self.args.model_dir) + trained_model.eval() + trained_model.to(self.args.accelerator) + return trained_model def prep_sort(self, prediction: dict) -> np.ndarray: """ @@ -92,7 +95,7 @@ def prep_sort(self, prediction: dict) -> np.ndarray: pred_sort = [] for box, score, label in zip(pred_boxes, pred_scores, pred_labels): - if score > self.score_threshold: + if score > self.config["score_threshold"]: bbox = np.concatenate((box, [score])) pred_sort.append(bbox) @@ -108,7 +111,7 @@ def load_video(self) -> None: raise Exception("Error opening video file") # prepare output video writer if required - if self.args.save_video: + if self.config["save_video"]: # read input video parameters frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)) @@ -157,7 +160,6 @@ def evaluate_tracking( self, gt_boxes_list: list, tracked_boxes_list: list, - iou_threshold: float, ) -> list[float]: """ Evaluate tracking performance using the Multi-Object Tracking Accuracy (MOTA) metric. @@ -168,8 +170,6 @@ def evaluate_tracking( List of ground truth bounding boxes for each frame. tracked_boxes_list : list[list[float]] List of tracked bounding boxes for each frame. - iou_threshold : float - The IoU threshold used to determine matches between ground truth and tracked boxes. Returns ------- @@ -181,7 +181,10 @@ def evaluate_tracking( # prev_frame_ids = None for gt_boxes, tracked_boxes in zip(gt_boxes_list, tracked_boxes_list): mota = evaluate_mota( - gt_boxes, tracked_boxes, iou_threshold, prev_frame_ids + gt_boxes, + tracked_boxes, + self.config["iou_threshold"], + prev_frame_ids, ) mota_values.append(mota) # Update previous frame IDs for the next iteration @@ -211,7 +214,9 @@ def get_prediction(self, frame: np.ndarray) -> torch.Tensor: ) img = transform(frame).to(self.args.accelerator) img = img.unsqueeze(0) - return self.trained_model(img) + with torch.no_grad(): + prediction = self.trained_model(img) + return prediction def update_tracking(self, prediction: dict) -> list[list[float]]: """ @@ -250,17 +255,23 @@ def save_required_output( frame_number : int The frame number. """ - if self.args.save_csv_and_frames: + frame_name = f"{self.video_file_root}_frame_{frame_number:08d}.png" + if self.config["save_csv_and_frames"]: save_frame_and_csv( - self.video_file_root, + frame_name, self.tracking_output_dir, tracked_boxes, frame, frame_number, self.csv_writer, ) + else: + for bbox in tracked_boxes: + write_tracked_bbox_to_csv( + bbox, frame, frame_name, self.csv_writer + ) - if self.args.save_video: + if self.config["save_video"]: frame_copy = frame.copy() for bbox in tracked_boxes: xmin, ymin, xmax, ymax, id = bbox @@ -281,9 +292,7 @@ def run_inference(self): frame_number = 1 self.tracked_list = [] - # initialise csv writer if required - if self.args.save_csv_and_frames: - self.csv_writer, csv_file = self.prep_csv_writer() + self.csv_writer, csv_file = self.prep_csv_writer() # loop thru frames of clip while self.video.isOpened(): @@ -311,7 +320,7 @@ def run_inference(self): if self.args.gt_dir: gt_boxes_list = get_ground_truth_data(self.args.gt_dir) mota_values = self.evaluate_tracking( - gt_boxes_list, self.tracked_list, self.iou_threshold + gt_boxes_list, self.tracked_list, self.config["iou_threshold"] ) overall_mota = np.mean(mota_values) print("Overall MOTA:", overall_mota) @@ -320,10 +329,10 @@ def run_inference(self): self.video.release() # Close outputs - if self.args.save_video: + if self.config["save_video"]: self.out.release() - if args.save_csv_and_frames: + if self.config["save_csv_and_frames"]: csv_file.close() @@ -361,9 +370,15 @@ def main(args) -> None: help="location of images and coco annotation", ) parser.add_argument( - "--save_video", - action="store_true", - help="save video inference", + "--config_file", + type=str, + default=str( + Path(__file__).parent / "config" / "inference_config.yaml" + ), + help=( + "Location of YAML config to control training. " + "Default: crabs-exploration/crabs/detection_tracking/config/inference_config.yaml" + ), ) parser.add_argument( "--output_path", @@ -371,44 +386,12 @@ def main(args) -> None: default=os.getcwd(), help="location of output video", ) - parser.add_argument( - "--score_threshold", - type=float, - default=0.1, - help="threshold for prediction score", - ) - parser.add_argument( - "--iou_threshold", - type=float, - default=0.1, - help="threshold for prediction score", - ) - parser.add_argument( - "--max_age", - type=int, - default=10, - help="Maximum number of frames to keep alive a track without associated detections.", - ) - parser.add_argument( - "--min_hits", - type=int, - default=1, - help="Minimum number of associated detections before track is initialised.", - ) parser.add_argument( "--accelerator", type=str, default="gpu", help="accelerator for pytorch lightning", ) - parser.add_argument( - "--save_csv_and_frames", - action="store_true", - help=( - "Save predicted tracks in VIA csv format and export corresponding frames. " - "This is useful to prepare for manual labelling of tracks." - ), - ) parser.add_argument( "--max_frames_to_read", type=int, diff --git a/crabs/detection_tracking/tracking_utils.py b/crabs/detection_tracking/tracking_utils.py index bf236a3c..290c7f7e 100644 --- a/crabs/detection_tracking/tracking_utils.py +++ b/crabs/detection_tracking/tracking_utils.py @@ -312,7 +312,7 @@ def write_tracked_bbox_to_csv( def save_frame_and_csv( - video_file_root: str, + frame_name: str, tracking_output_dir: Path, tracked_boxes: list[list[float]], frame: np.ndarray, @@ -341,8 +341,6 @@ def save_frame_and_csv( ------- None """ - frame_name = f"{video_file_root}_frame_{frame_number:08d}.png" - for bbox in tracked_boxes: # Add bbox to csv write_tracked_bbox_to_csv(bbox, frame, frame_name, csv_writer) @@ -354,4 +352,3 @@ def save_frame_and_csv( logging.error( f"Didn't save {frame_name}, frame {frame_number}, Skipping." ) - logging.info(f"Frame {frame_number} saved at {frame_path}") From 94761b8c8d6adaf37c90d0097a130b8b25950a55 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 17 Jun 2024 15:43:12 +0100 Subject: [PATCH 02/52] adding inference to toml --- crabs/detection_tracking/inference_model.py | 8 +++----- pyproject.toml | 1 + 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 7a3bd3a6..8c565158 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -18,9 +18,7 @@ save_frame_and_csv, write_tracked_bbox_to_csv, ) -from crabs.detection_tracking.visualization import ( - draw_bbox, -) +from crabs.detection_tracking.visualization import draw_bbox class DetectorInference: @@ -358,10 +356,10 @@ def main(args) -> None: if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "--model_dir", + "--ckpt_path", type=str, required=True, - help="location of trained model", + help="location of checkpoint of the trained model", ) parser.add_argument( "--vid_path", diff --git a/pyproject.toml b/pyproject.toml index 9f40cc14..fa3d9495 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ extract-frames = "crabs.bboxes_labelling.extract_frames_to_label_w_sleap:app_wra combine-annotations = "crabs.bboxes_labelling.combine_and_format_annotations:app_wrapper" train-detector = "crabs.detection_tracking.train_model:app_wrapper" evaluate-detector = "crabs.detection_tracking.evaluate_model:app_wrapper" +inference-detector = "crabs.detection_tracking.inference_model:app_wrapper" # verify-videos-and-extract-samples # extract-additional-channels From e4f1bacbe0d2e58b8784ed13f22591e6b91e885f Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 10:48:19 +0100 Subject: [PATCH 03/52] adding bash script --- bash_scripts/run_inference.sh | 98 +++++++++++++++++++++++++ crabs/detection_tracking/train_model.py | 2 +- 2 files changed, 99 insertions(+), 1 deletion(-) create mode 100644 bash_scripts/run_inference.sh diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh new file mode 100644 index 00000000..48e0a28d --- /dev/null +++ b/bash_scripts/run_inference.sh @@ -0,0 +1,98 @@ +#!/bin/bash + + #SBATCH -p gpu # a100 # partition + #SBATCH --gres=gpu:1 # gpu:a100_2g.10gb # For any GPU: --gres=gpu:1. For a specific one: --gres=gpu:rtx5000 + #SBATCH -N 1 # number of nodes + #SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node + #SBATCH --mem 32G # memory pool for all cores + #SBATCH -t 3-00:00 # time (D-HH:MM) + #SBATCH -o slurm.%A.%N.out + #SBATCH -e slurm.%A.%N.err + #SBATCH --mail-type=ALL + #SBATCH --mail-user=n.aznan@ucl.ac.uk + + # --------------------- + # Source bashrc + # ---------------------- + # Otherwise `which python` points to the miniconda module's Python + source ~/.bashrc + + # memory + # see https://pytorch.org/docs/stable/notes/cuda.html#environment-variables + PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True + + # ----------------------------- + # Error settings for bash + # ----------------------------- + # see https://wizardzines.com/comics/bash-errors/ + set -e # do not continue after errors + set -u # throw error if variable is unset + set -o pipefail # make the pipe fail if any part of it fails + + # --------------------- + # Define variables + # ---------------------- + + # mlflow + EXPERIMENT_NAME="Sept2023_inference" + MLFLOW_FOLDER=/ceph/zoo/users/sminano/ml-runs-all/ml-runs-scratch + + # video and inference config + VIDEO_PATH=/ceph/zoo/users/sminano/crabs_bboxes_labels/Sep2023_labelled + CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/detection_tracking/config/inference_config.yaml + + # checkpoint + CKPT_FILE=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + + # version of the codebase + GIT_BRANCH=nikkna/inference_cluster + + # ----------------------------- + # Create virtual environment + # ----------------------------- + module load miniconda + + # Define a environment for each job in the + # temporary directory of the compute node + ENV_NAME=crabs-dev-$SLURM_JOB_ID + ENV_PREFIX=$TMPDIR/$ENV_NAME + + # create environment + conda create \ + --prefix $ENV_PREFIX \ + -y \ + python=3.10 + + # activate environment + conda activate $ENV_PREFIX + + # install crabs package in virtual env + python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH + + + # log pip and python locations + echo $ENV_PREFIX + which python + which pip + + # print the version of crabs package (last number is the commit hash) + echo "Git branch: $GIT_BRANCH" + conda list crabs + echo "-----" + + # ------------------------------------ + # GPU specs + # ------------------------------------ + echo "Memory used per GPU before training" + echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader + echo "-----" + + + # ------------------- + # Run evaluation script + # ------------------- + inference-detector \ + --checkpoint_path $CKPT_PATH \ + --config_file $CONFIG_FILE \ + --accelerator gpu \ + \ No newline at end of file diff --git a/crabs/detection_tracking/train_model.py b/crabs/detection_tracking/train_model.py index 2f2abd8d..780b1840 100644 --- a/crabs/detection_tracking/train_model.py +++ b/crabs/detection_tracking/train_model.py @@ -278,4 +278,4 @@ def app_wrapper(): if __name__ == "__main__": - app_wrapper() + app_wrapper() \ No newline at end of file From 0b3ddd91876d7660ca3a700ece2aafa6abaeeb97 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 11:10:46 +0100 Subject: [PATCH 04/52] change variable --- bash_scripts/run_inference.sh | 1 + crabs/detection_tracking/inference_model.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 48e0a28d..58e6ebcd 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -93,6 +93,7 @@ # ------------------- inference-detector \ --checkpoint_path $CKPT_PATH \ + --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --accelerator gpu \ \ No newline at end of file diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 8c565158..a86e2019 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -35,7 +35,7 @@ class DetectorInference: ---------- args : argparse.Namespace The command-line arguments provided. - vid_path : str + video_path : str The path to the input video. sort_tracker : Sort An instance of the sorting algorithm used for tracking. @@ -44,9 +44,9 @@ class DetectorInference: def __init__(self, args: argparse.Namespace) -> None: self.args = args self.config_file = args.config_file - self.vid_path = args.vid_path + self.video_path = args.video_path - self.video_file_root = f"{Path(self.vid_path).stem}" + self.video_file_root = f"{Path(self.video_path).stem}" self.trained_model = self.load_trained_model() self.load_config_yaml() self.sort_tracker = Sort( @@ -104,7 +104,7 @@ def load_video(self) -> None: Load the input video, and prepare the output video if required. """ # load input video - self.video = cv2.VideoCapture(self.vid_path) + self.video = cv2.VideoCapture(self.video_path) if not self.video.isOpened(): raise Exception("Error opening video file") @@ -362,7 +362,7 @@ def main(args) -> None: help="location of checkpoint of the trained model", ) parser.add_argument( - "--vid_path", + "--video_path", type=str, required=True, help="location of images and coco annotation", From 892914e06a4e02e4d03a56a579cf5194ded9d983 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 11:13:14 +0100 Subject: [PATCH 05/52] change variable --- crabs/detection_tracking/inference_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index a86e2019..86f0c160 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -68,7 +68,7 @@ def load_trained_model(self) -> torch.nn.Module: torch.nn.Module """ # Get trained model - trained_model = FasterRCNN.load_from_checkpoint(self.args.model_dir) + trained_model = FasterRCNN.load_from_checkpoint(self.args.checkpoint_path) trained_model.eval() trained_model.to(self.args.accelerator) return trained_model @@ -356,7 +356,7 @@ def main(args) -> None: if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "--ckpt_path", + "--checkpoint_path", type=str, required=True, help="location of checkpoint of the trained model", From 66c22be68c08f58433b8926e4ced9d8e93c7aac3 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 11:27:50 +0100 Subject: [PATCH 06/52] naming error --- bash_scripts/run_inference.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 58e6ebcd..38286a91 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -42,7 +42,7 @@ CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/detection_tracking/config/inference_config.yaml # checkpoint - CKPT_FILE=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + CKPT_Path=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt # version of the codebase GIT_BRANCH=nikkna/inference_cluster From 3fab7132a7ef3835021a7815c8f4b74be00bd92f Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 11:41:15 +0100 Subject: [PATCH 07/52] naming error --- bash_scripts/run_inference.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 38286a91..f593e828 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -42,7 +42,7 @@ CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/detection_tracking/config/inference_config.yaml # checkpoint - CKPT_Path=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt # version of the codebase GIT_BRANCH=nikkna/inference_cluster From 2b0d27384497a6f21649288a42b5e6c75f98021b Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 16:02:40 +0100 Subject: [PATCH 08/52] fixed import --- crabs/detection_tracking/inference_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 86f0c160..9bde2494 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -9,7 +9,7 @@ import torch import torchvision.transforms.v2 as transforms import yaml # type: ignore -from sort import Sort +from crabs.detection_tracking.sort import Sort from crabs.detection_tracking.models import FasterRCNN from crabs.detection_tracking.tracking_utils import ( From 85452afea8653f240e246bde2fe56b7548b2a788 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 16:12:34 +0100 Subject: [PATCH 09/52] cleaned up sort --- bash_scripts/run_inference.sh | 1 - crabs/detection_tracking/inference_model.py | 6 +- crabs/detection_tracking/sort.py | 146 -------------------- crabs/detection_tracking/train_model.py | 2 +- 4 files changed, 5 insertions(+), 150 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index f593e828..1f6bb40b 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -96,4 +96,3 @@ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --accelerator gpu \ - \ No newline at end of file diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 9bde2494..bfe79ecb 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -9,9 +9,9 @@ import torch import torchvision.transforms.v2 as transforms import yaml # type: ignore -from crabs.detection_tracking.sort import Sort from crabs.detection_tracking.models import FasterRCNN +from crabs.detection_tracking.sort import Sort from crabs.detection_tracking.tracking_utils import ( evaluate_mota, get_ground_truth_data, @@ -68,7 +68,9 @@ def load_trained_model(self) -> torch.nn.Module: torch.nn.Module """ # Get trained model - trained_model = FasterRCNN.load_from_checkpoint(self.args.checkpoint_path) + trained_model = FasterRCNN.load_from_checkpoint( + self.args.checkpoint_path + ) trained_model.eval() trained_model.to(self.args.accelerator) return trained_model diff --git a/crabs/detection_tracking/sort.py b/crabs/detection_tracking/sort.py index 9dac8b00..38f9a4b3 100644 --- a/crabs/detection_tracking/sort.py +++ b/crabs/detection_tracking/sort.py @@ -18,20 +18,8 @@ from __future__ import print_function -import argparse -import glob -import os -import time - -import matplotlib -import matplotlib.patches as patches -import matplotlib.pyplot as plt import numpy as np from filterpy.kalman import KalmanFilter -from skimage import io - -np.random.seed(0) -matplotlib.use("TkAgg") def linear_assignment(cost_matrix): @@ -310,137 +298,3 @@ def update(self, dets=np.empty((0, 5))): if len(ret) > 0: return np.concatenate(ret) return np.empty((0, 5)) - - -def parse_args(): - """Parse input arguments.""" - parser = argparse.ArgumentParser(description="SORT demo") - parser.add_argument( - "--display", - dest="display", - help="Display online tracker output (slow) [False]", - action="store_true", - ) - parser.add_argument( - "--seq_path", help="Path to detections.", type=str, default="data" - ) - parser.add_argument( - "--phase", help="Subdirectory in seq_path.", type=str, default="train" - ) - parser.add_argument( - "--max_age", - help="""Maximum number of frames to keep alive a track without - associated detections.""", - type=int, - default=1, - ) - parser.add_argument( - "--min_hits", - help="Minimum number of associated detections before track is initialised.", - type=int, - default=3, - ) - parser.add_argument( - "--iou_threshold", - help="Minimum IOU for match.", - type=float, - default=0.3, - ) - args = parser.parse_args() - return args - - -if __name__ == "__main__": - # all train - args = parse_args() - display = args.display - phase = args.phase - total_time = 0.0 - total_frames = 0 - colours = np.random.rand(32, 3) # used only for display - if display: - if not os.path.exists("mot_benchmark"): - print( - "\n\tERROR: mot_benchmark link not found!\n\n" - " Create a symbolic link to the MOT benchmark\n" - " (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n" - " $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n" - ) - - exit() - plt.ion() - fig = plt.figure() - ax1 = fig.add_subplot(111, aspect="equal") - - if not os.path.exists("output"): - os.makedirs("output") - pattern = os.path.join(args.seq_path, phase, "*", "det", "det.txt") - for seq_dets_fn in glob.glob(pattern): - mot_tracker = Sort( - max_age=args.max_age, - min_hits=args.min_hits, - iou_threshold=args.iou_threshold, - ) # create instance of the SORT tracker - seq_dets = np.loadtxt(seq_dets_fn, delimiter=",") - seq = seq_dets_fn[pattern.find("*") :].split(os.path.sep)[0] - - with open(os.path.join("output", "%s.txt" % (seq)), "w") as out_file: - print("Processing %s." % (seq)) - for frame in range(int(seq_dets[:, 0].max())): - frame += 1 # detection and frame numbers begin at 1 - dets = seq_dets[seq_dets[:, 0] == frame, 2:7] - dets[:, 2:4] += dets[ - :, 0:2 - ] # convert to [x1,y1,w,h] to [x1,y1,x2,y2] - total_frames += 1 - - if display: - fn = os.path.join( - "mot_benchmark", - phase, - seq, - "img1", - "%06d.jpg" % (frame), - ) - im = io.imread(fn) - ax1.imshow(im) - plt.title(seq + " Tracked Targets") - - start_time = time.time() - trackers = mot_tracker.update(dets) - cycle_time = time.time() - start_time - total_time += cycle_time - - for d in trackers: - print( - "%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1" - % (frame, d[4], d[0], d[1], d[2] - d[0], d[3] - d[1]), - file=out_file, - ) - if display: - d = d.astype(np.int32) - ax1.add_patch( - patches.Rectangle( - (d[0], d[1]), - d[2] - d[0], - d[3] - d[1], - fill=False, - lw=3, - ec=colours[d[4] % 32, :], - ) - ) - - if display: - fig.canvas.flush_events() - plt.draw() - ax1.cla() - - print( - "Total Tracking took: %.3f seconds for %d frames or %.1f FPS" - % (total_time, total_frames, total_frames / total_time) - ) - - if display: - print( - "Note: to get real runtime results run without the option: --display" - ) diff --git a/crabs/detection_tracking/train_model.py b/crabs/detection_tracking/train_model.py index 780b1840..2f2abd8d 100644 --- a/crabs/detection_tracking/train_model.py +++ b/crabs/detection_tracking/train_model.py @@ -278,4 +278,4 @@ def app_wrapper(): if __name__ == "__main__": - app_wrapper() \ No newline at end of file + app_wrapper() From f056b41be4fb1b3ce621f3620e952c1773853861 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 16:25:11 +0100 Subject: [PATCH 10/52] add app_wrapper --- crabs/detection_tracking/inference_model.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index bfe79ecb..05edf526 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -1,6 +1,7 @@ import argparse import csv import os +import sys from pathlib import Path from typing import Any, Optional, TextIO, Tuple @@ -355,7 +356,7 @@ def main(args) -> None: inference.run_inference() -if __name__ == "__main__": +def inference_parse_args(args): parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", @@ -406,3 +407,14 @@ def main(args) -> None: ) args = parser.parse_args() main(args) + + +def app_wrapper(): + torch.set_float32_matmul_precision("medium") + + train_args = inference_parse_args(sys.argv[1:]) + main(train_args) + + +if __name__ == "__main__": + app_wrapper() From 8780c36dbe1f9a71475295b48438bd77a085b680 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 16:36:25 +0100 Subject: [PATCH 11/52] changed accelerator --- crabs/detection_tracking/inference_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 05edf526..f8bf30f7 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -390,7 +390,7 @@ def inference_parse_args(args): parser.add_argument( "--accelerator", type=str, - default="gpu", + default="cuda", help="accelerator for pytorch lightning", ) parser.add_argument( From 56b74ff1ac4cae1cc40719402ef18fe0823ef005 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 20:00:49 +0100 Subject: [PATCH 12/52] bugs --- crabs/detection_tracking/inference_model.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index f8bf30f7..dcf265ae 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -73,7 +73,7 @@ def load_trained_model(self) -> torch.nn.Module: self.args.checkpoint_path ) trained_model.eval() - trained_model.to(self.args.accelerator) + # trained_model.to(self.args.accelerator) return trained_model def prep_sort(self, prediction: dict) -> np.ndarray: @@ -213,7 +213,8 @@ def get_prediction(self, frame: np.ndarray) -> torch.Tensor: transforms.ToDtype(torch.float32, scale=True), ] ) - img = transform(frame).to(self.args.accelerator) + img = transform(frame) + # .to(self.args.accelerator) img = img.unsqueeze(0) with torch.no_grad(): prediction = self.trained_model(img) @@ -405,15 +406,14 @@ def inference_parse_args(args): default=None, help="Location of json file containing ground truth annotations.", ) - args = parser.parse_args() - main(args) + return parser.parse_args(args) def app_wrapper(): torch.set_float32_matmul_precision("medium") - train_args = inference_parse_args(sys.argv[1:]) - main(train_args) + inference_args = inference_parse_args(sys.argv[1:]) + main(inference_args) if __name__ == "__main__": From a30b0dc95439212d4329fbb667296a977d2cce81 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 20:31:08 +0100 Subject: [PATCH 13/52] removed accelerator --- crabs/detection_tracking/inference_model.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index dcf265ae..643e0298 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -73,7 +73,6 @@ def load_trained_model(self) -> torch.nn.Module: self.args.checkpoint_path ) trained_model.eval() - # trained_model.to(self.args.accelerator) return trained_model def prep_sort(self, prediction: dict) -> np.ndarray: @@ -214,7 +213,6 @@ def get_prediction(self, frame: np.ndarray) -> torch.Tensor: ] ) img = transform(frame) - # .to(self.args.accelerator) img = img.unsqueeze(0) with torch.no_grad(): prediction = self.trained_model(img) @@ -388,12 +386,6 @@ def inference_parse_args(args): default=os.getcwd(), help="location of output video", ) - parser.add_argument( - "--accelerator", - type=str, - default="cuda", - help="accelerator for pytorch lightning", - ) parser.add_argument( "--max_frames_to_read", type=int, From 918674dbacc8ed2ea417757d6d298795790ae9cd Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 20:51:55 +0100 Subject: [PATCH 14/52] removed accelerator --- bash_scripts/run_inference.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 1f6bb40b..f79dac40 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -95,4 +95,3 @@ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ - --accelerator gpu \ From 2d6da1eb41c62e799df7beb42b9c9aca6f9ee98c Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 18 Jun 2024 21:24:13 +0100 Subject: [PATCH 15/52] wrong path --- bash_scripts/run_inference.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index f79dac40..9c135f2a 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -39,7 +39,7 @@ # video and inference config VIDEO_PATH=/ceph/zoo/users/sminano/crabs_bboxes_labels/Sep2023_labelled - CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/detection_tracking/config/inference_config.yaml + CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/detection_tracking/config/inference_config.yaml # checkpoint CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt From e458c6dd47a00ca56e5d28b1590f3de2b4a512bc Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 09:18:00 +0100 Subject: [PATCH 16/52] edit path --- bash_scripts/run_inference.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 9c135f2a..422a0a16 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -38,7 +38,7 @@ MLFLOW_FOLDER=/ceph/zoo/users/sminano/ml-runs-all/ml-runs-scratch # video and inference config - VIDEO_PATH=/ceph/zoo/users/sminano/crabs_bboxes_labels/Sep2023_labelled + VIDEO_PATH=/ceph/zoo/users/sminano/crabs_reencoded_videos/Sep2023_day1_reencoded/04.09.2023-04-Right_RE.mp4 CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/detection_tracking/config/inference_config.yaml # checkpoint @@ -87,7 +87,6 @@ echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader echo "-----" - # ------------------- # Run evaluation script # ------------------- @@ -95,3 +94,4 @@ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ + --max_frames_to_read 10 From 29cfea61de53eb8ac228e4ce907ee59c11e33ca1 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 10:13:44 +0100 Subject: [PATCH 17/52] adding batches --- bash_scripts/run_inference.sh | 8 +++- crabs/detection_tracking/inference_model.py | 44 +++++++++++++++------ 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 422a0a16..a87c8c43 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -4,7 +4,7 @@ #SBATCH --gres=gpu:1 # gpu:a100_2g.10gb # For any GPU: --gres=gpu:1. For a specific one: --gres=gpu:rtx5000 #SBATCH -N 1 # number of nodes #SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node - #SBATCH --mem 32G # memory pool for all cores + #SBATCH --mem 64G # memory pool for all cores #SBATCH -t 3-00:00 # time (D-HH:MM) #SBATCH -o slurm.%A.%N.out #SBATCH -e slurm.%A.%N.err @@ -87,6 +87,12 @@ echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader echo "-----" + # Monitor memory usage + while true; do + echo "$(date): $(free -h)" >> memory_usage.log + sleep 60 + done & + # ------------------- # Run evaluation script # ------------------- diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 643e0298..c5346b4f 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -294,25 +294,43 @@ def run_inference(self): self.csv_writer, csv_file = self.prep_csv_writer() - # loop thru frames of clip + # Loop through frames of the video in batches + frames = [] while self.video.isOpened(): - # break if beyond end frame (mostly for debugging) - if self.args.max_frames_to_read: - if frame_number > self.args.max_frames_to_read: + # Break if beyond end frame (mostly for debugging) + if ( + self.args.max_frames_to_read + and frame_number > self.args.max_frames_to_read + ): + break + + # Read frames in batches + for _ in range(4): + ret, frame = self.video.read() + if not ret: + print("No frame read. Exiting...") break + frames.append((frame_number, frame)) + frame_number += 1 - # read frame - ret, frame = self.video.read() - if not ret: - print("No frame read. Exiting...") + if not frames: break - prediction = self.get_prediction(frame) + # Process the batch of frames + predictions = [self.get_prediction(frame) for _, frame in frames] + + for (frame_number, frame), prediction in zip(frames, predictions): + # Run tracking + self.prep_sort(prediction) + tracked_boxes = self.update_tracking(prediction) + self.save_required_output(tracked_boxes, frame, frame_number) + + # Explicitly delete frame and prediction to free up memory + del frame + del prediction - # run tracking - self.prep_sort(prediction) - tracked_boxes = self.update_tracking(prediction) - self.save_required_output(tracked_boxes, frame, frame_number) + # Clear the frames list for the next batch + frames.clear() # update frame frame_number += 1 From ec6886a4595777a8ab4362dbdf55d97a87d6a55c Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 14:31:54 +0100 Subject: [PATCH 18/52] debugging oom --- crabs/detection_tracking/inference_model.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index c5346b4f..55030ee0 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -297,6 +297,7 @@ def run_inference(self): # Loop through frames of the video in batches frames = [] while self.video.isOpened(): + print("frame number:", frame_number) # Break if beyond end frame (mostly for debugging) if ( self.args.max_frames_to_read @@ -305,7 +306,7 @@ def run_inference(self): break # Read frames in batches - for _ in range(4): + for _ in range(1): ret, frame = self.video.read() if not ret: print("No frame read. Exiting...") @@ -317,10 +318,12 @@ def run_inference(self): break # Process the batch of frames + print("predict") predictions = [self.get_prediction(frame) for _, frame in frames] for (frame_number, frame), prediction in zip(frames, predictions): # Run tracking + print("tracking") self.prep_sort(prediction) tracked_boxes = self.update_tracking(prediction) self.save_required_output(tracked_boxes, frame, frame_number) @@ -332,8 +335,8 @@ def run_inference(self): # Clear the frames list for the next batch frames.clear() - # update frame - frame_number += 1 + # # update frame + # frame_number += 1 if self.args.gt_dir: gt_boxes_list = get_ground_truth_data(self.args.gt_dir) From 83ed3424de164aaa915b042999e3bb236d864630 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 14:50:46 +0100 Subject: [PATCH 19/52] save video to false --- crabs/detection_tracking/config/inference_config.yaml | 2 +- crabs/detection_tracking/inference_model.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/config/inference_config.yaml b/crabs/detection_tracking/config/inference_config.yaml index a8fad266..a6905b40 100644 --- a/crabs/detection_tracking/config/inference_config.yaml +++ b/crabs/detection_tracking/config/inference_config.yaml @@ -5,7 +5,7 @@ max_age: 10 # Minimum number of associated detections before track is initialised min_hits: 1 # save video inference -save_video: True +save_video: False # Save predicted tracks in VIA csv format and export corresponding frames # This is useful to prepare for manual labelling of tracks save_csv_and_frames: False diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 55030ee0..c0177c17 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -109,6 +109,7 @@ def load_video(self) -> None: self.video = cv2.VideoCapture(self.video_path) if not self.video.isOpened(): raise Exception("Error opening video file") + print("Finished loading the video") # prepare output video writer if required if self.config["save_video"]: From d3942ff47355913e71d9962ff9b8fc0bcf6da411 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 15:11:56 +0100 Subject: [PATCH 20/52] save video to false --- crabs/detection_tracking/inference_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index c0177c17..f195ff55 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -106,6 +106,7 @@ def load_video(self) -> None: Load the input video, and prepare the output video if required. """ # load input video + print("loading video") self.video = cv2.VideoCapture(self.video_path) if not self.video.isOpened(): raise Exception("Error opening video file") @@ -373,6 +374,7 @@ def main(args) -> None: """ inference = DetectorInference(args) + print("get args") inference.load_video() inference.run_inference() From 2900a9e7afc50bb0b468e41a53d47949650f8862 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 19 Jun 2024 16:10:43 +0100 Subject: [PATCH 21/52] adding device --- crabs/detection_tracking/inference_model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index f195ff55..a88cbab6 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -22,6 +22,8 @@ from crabs.detection_tracking.visualization import draw_bbox +DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + class DetectorInference: """ A class for performing object detection or tracking inference on a video @@ -73,6 +75,7 @@ def load_trained_model(self) -> torch.nn.Module: self.args.checkpoint_path ) trained_model.eval() + trained_model.to(DEVICE) return trained_model def prep_sort(self, prediction: dict) -> np.ndarray: @@ -214,7 +217,7 @@ def get_prediction(self, frame: np.ndarray) -> torch.Tensor: transforms.ToDtype(torch.float32, scale=True), ] ) - img = transform(frame) + img = transform(frame).to(DEVICE) img = img.unsqueeze(0) with torch.no_grad(): prediction = self.trained_model(img) From 500d274716aa933b0f8b78d37dbaa039596cf1d2 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Thu, 20 Jun 2024 15:55:16 +0100 Subject: [PATCH 22/52] revert the batch out --- crabs/detection_tracking/inference_model.py | 45 +++++++-------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index a88cbab6..ad8da9e2 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -21,9 +21,9 @@ ) from crabs.detection_tracking.visualization import draw_bbox - DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + class DetectorInference: """ A class for performing object detection or tracking inference on a video @@ -136,6 +136,7 @@ def prep_csv_writer(self) -> Tuple[Any, TextIO]: self.tracking_output_dir = ( crabs_tracks_label_dir / self.video_file_root ) + print(self.tracking_output_dir, flush=True) # Create the subdirectory for the specific video file root self.tracking_output_dir.mkdir(parents=True, exist_ok=True) @@ -300,9 +301,8 @@ def run_inference(self): self.csv_writer, csv_file = self.prep_csv_writer() # Loop through frames of the video in batches - frames = [] while self.video.isOpened(): - print("frame number:", frame_number) + print("frame number:", frame_number, flush=True) # Break if beyond end frame (mostly for debugging) if ( self.args.max_frames_to_read @@ -310,38 +310,21 @@ def run_inference(self): ): break - # Read frames in batches - for _ in range(1): - ret, frame = self.video.read() - if not ret: - print("No frame read. Exiting...") - break - frames.append((frame_number, frame)) - frame_number += 1 - - if not frames: + # read frame + ret, frame = self.video.read() + if not ret: + print("No frame read. Exiting...") break - # Process the batch of frames - print("predict") - predictions = [self.get_prediction(frame) for _, frame in frames] - - for (frame_number, frame), prediction in zip(frames, predictions): - # Run tracking - print("tracking") - self.prep_sort(prediction) - tracked_boxes = self.update_tracking(prediction) - self.save_required_output(tracked_boxes, frame, frame_number) - - # Explicitly delete frame and prediction to free up memory - del frame - del prediction + prediction = self.get_prediction(frame) - # Clear the frames list for the next batch - frames.clear() + # run tracking + self.prep_sort(prediction) + tracked_boxes = self.update_tracking(prediction) + self.save_required_output(tracked_boxes, frame, frame_number) - # # update frame - # frame_number += 1 + # update frame + frame_number += 1 if self.args.gt_dir: gt_boxes_list = get_ground_truth_data(self.args.gt_dir) From 7260ca8dfba4c02e2eb97d16e0881f11d02fff05 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Thu, 20 Jun 2024 16:39:18 +0100 Subject: [PATCH 23/52] modify bash script --- bash_scripts/run_inference.sh | 84 ++++++++++++++++------------------- 1 file changed, 39 insertions(+), 45 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index a87c8c43..47489497 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH -p gpu # a100 # partition - #SBATCH --gres=gpu:1 # gpu:a100_2g.10gb # For any GPU: --gres=gpu:1. For a specific one: --gres=gpu:rtx5000 + #SBATCH --gres=gpu:1 #SBATCH -N 1 # number of nodes #SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node #SBATCH --mem 64G # memory pool for all cores @@ -48,50 +48,44 @@ GIT_BRANCH=nikkna/inference_cluster # ----------------------------- - # Create virtual environment - # ----------------------------- - module load miniconda - - # Define a environment for each job in the - # temporary directory of the compute node - ENV_NAME=crabs-dev-$SLURM_JOB_ID - ENV_PREFIX=$TMPDIR/$ENV_NAME - - # create environment - conda create \ - --prefix $ENV_PREFIX \ - -y \ - python=3.10 - - # activate environment - conda activate $ENV_PREFIX - - # install crabs package in virtual env - python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH - - - # log pip and python locations - echo $ENV_PREFIX - which python - which pip - - # print the version of crabs package (last number is the commit hash) - echo "Git branch: $GIT_BRANCH" - conda list crabs - echo "-----" - - # ------------------------------------ - # GPU specs - # ------------------------------------ - echo "Memory used per GPU before training" - echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader - echo "-----" - - # Monitor memory usage - while true; do - echo "$(date): $(free -h)" >> memory_usage.log - sleep 60 - done & +# Create virtual environment +# ----------------------------- +module load miniconda + +# Define a environment for each job in the +# temporary directory of the compute node +ENV_NAME=crabs-dev-$SLURM_JOB_ID +ENV_PREFIX=$TMPDIR/$ENV_NAME + +# create environment +conda create \ + --prefix $ENV_PREFIX \ + -y \ + python=3.10 + +# activate environment +conda activate $ENV_PREFIX + +# install crabs package in virtual env +python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH + + +# log pip and python locations +echo $ENV_PREFIX +which python +which pip + +# print the version of crabs package (last number is the commit hash) +echo "Git branch: $GIT_BRANCH" +conda list crabs +echo "-----" + +# ------------------------------------ +# GPU specs +# ------------------------------------ +echo "Memory used per GPU before training" +echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader +echo "-----" # ------------------- # Run evaluation script From def687ac14780490f850819181ffdc1c951fff09 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 11:24:38 +0100 Subject: [PATCH 24/52] add guide --- bash_scripts/run_inference.sh | 3 + .../config/inference_config.yaml | 4 +- crabs/detection_tracking/inference_model.py | 32 ++-- guides/InferenceModelHPC.md | 155 ++++++++++++++++++ 4 files changed, 178 insertions(+), 16 deletions(-) create mode 100644 guides/InferenceModelHPC.md diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 47489497..4e09a296 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -44,6 +44,8 @@ # checkpoint CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + #output directory + OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output # version of the codebase GIT_BRANCH=nikkna/inference_cluster @@ -94,4 +96,5 @@ echo "-----" --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ + --output_dir $OUTPUT_DIR \ --max_frames_to_read 10 diff --git a/crabs/detection_tracking/config/inference_config.yaml b/crabs/detection_tracking/config/inference_config.yaml index a6905b40..720fb1c1 100644 --- a/crabs/detection_tracking/config/inference_config.yaml +++ b/crabs/detection_tracking/config/inference_config.yaml @@ -5,7 +5,7 @@ max_age: 10 # Minimum number of associated detections before track is initialised min_hits: 1 # save video inference -save_video: False +save_video: True # Save predicted tracks in VIA csv format and export corresponding frames # This is useful to prepare for manual labelling of tracks -save_csv_and_frames: False +save_csv_and_frames: True diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index ad8da9e2..e21154ab 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -108,22 +108,25 @@ def load_video(self) -> None: """ Load the input video, and prepare the output video if required. """ - # load input video - print("loading video") self.video = cv2.VideoCapture(self.video_path) if not self.video.isOpened(): raise Exception("Error opening video file") - print("Finished loading the video") - # prepare output video writer if required + # create directory to save output + os.makedirs(self.args.output_dir, exist_ok=True) + if self.config["save_video"]: - # read input video parameters frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap_fps = self.video.get(cv2.CAP_PROP_FPS) - output_file = f"{self.video_file_root}_output_video.mp4" + + output_file = os.path.join( + self.args.output_dir, + f"{os.path.basename(self.video_file_root)}_output_video.mp4", + ) + output_codec = cv2.VideoWriter_fourcc(*"H264") - self.out = cv2.VideoWriter( + self.video_output = cv2.VideoWriter( output_file, output_codec, cap_fps, (frame_width, frame_height) ) @@ -132,7 +135,9 @@ def prep_csv_writer(self) -> Tuple[Any, TextIO]: Prepare csv writer to output tracking results """ - crabs_tracks_label_dir = Path("crabs_tracks_label") + crabs_tracks_label_dir = ( + Path(self.args.output_dir) / "crabs_tracks_label" + ) self.tracking_output_dir = ( crabs_tracks_label_dir / self.video_file_root ) @@ -288,7 +293,7 @@ def save_required_output( (0, 0, 255), f"id : {int(id)}", ) - self.out.write(frame_copy) + self.video_output.write(frame_copy) def run_inference(self): """ @@ -339,7 +344,7 @@ def run_inference(self): # Close outputs if self.config["save_video"]: - self.out.release() + self.video_output.release() if self.config["save_csv_and_frames"]: csv_file.close() @@ -360,7 +365,6 @@ def main(args) -> None: """ inference = DetectorInference(args) - print("get args") inference.load_video() inference.run_inference() @@ -391,10 +395,10 @@ def inference_parse_args(args): ), ) parser.add_argument( - "--output_path", + "--output_dir", type=str, - default=os.getcwd(), - help="location of output video", + default="crabs_track_output", + help="Directory to save the track output", ) parser.add_argument( "--max_frames_to_read", diff --git a/guides/InferenceModelHPC.md b/guides/InferenceModelHPC.md new file mode 100644 index 00000000..ba5c79d8 --- /dev/null +++ b/guides/InferenceModelHPC.md @@ -0,0 +1,155 @@ +# Evaluate a trained detector model in the cluster + +1. **Preparatory steps** + + - If you are not connected to the SWC network: connect to the SWC VPN. + +1. **Connect to the SWC HPC cluster** + + ``` + ssh @ssh.swc.ucl.ac.uk + ssh hpc-gw1 + ``` + + It may ask for your password twice. To set up SSH keys for the SWC cluster, see [this guide](https://howto.neuroinformatics.dev/programming/SSH-SWC-cluster.html#ssh-keys). + +1. **Download the training script from the 🦀 repository** + + To do so, run any of the following commands. They will download a bash script for training (`run_evaluate_single.sh` or `run_evaluate_array.sh`) to the current working directory. + + The download the version of these files in the `main` branch of the [🦀 repository](https://github.com/SainsburyWellcomeCentre/crabs-exploration), run one of the following commands. + + - To train a single job: download the `run_inference.sh` file + + ``` + curl https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/main/bash_scripts/run_inference.sh > run_inference.sh + ``` + + These bash scripts will launch a SLURM job that: + + - gets the 🦀 package from git, + - installs it in the compute node, + - and runs a training job. + +> [!TIP] +> To retrieve a version of these files that is different from the files at the tip of `main`, edit the remote file path in the curl command: +> +> - For example, to download the version of the file at the tip of a branch called ``, edit the path above to replace `main` with ``: +> ``` +> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration//bash_scripts/run_training_single.sh +> ``` +> - To download the version of the file of a specific commit, replace `main` with `blob/`: +> ``` +> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/blob//bash_scripts/run_training_single.sh +> ``` + +4. **Edit the bash script!** + + For inference, we need to ensure the correct trained model is used. All the parameters used in any training is logged into `mlflow`. + + We can see the perfomance of each training session by inspecting the `metrics` tab in `mlflow UI` where the `training loss`, `validation precision` and `validation recall` are plotted. The trained model (`checkpoint path`) are logged in `parameters` section under `overview` tab. + + When launching an inference job, we may want to edit in the bash script: + + - The `CKPT_PATH` + - The `VIDEO_PATH` + - The `OUTPUT_DIR` + + Less frequently, one may need to edit: + + - the `CONFIG_FILE`: usually we point to the same file we used to train the model at `/ceph/zoo/users/sminano/cluster_train_config.yaml` which we can edit. Note that all config parameters are logged in MLflow, so we don't need to keep track of that file; + - the `GIT_BRANCH`, if we want to use a specific version of the 🦀 package. Usually we will run the version of the 🦀 package in `main`. + +5. **Other Inference options** + + By default the inference will save the tracking output into csv. There are other options that we can enable in the config file + + - `save_video` + - `save_csv_and_frames` + + Additionally if we have ground truth of the video we used, we may want to add that to get the tracking evaluation: + + - `GT_DIR` + + ``` + inference-detector \ + --checkpoint_path $CKPT_PATH \ + --video_path $VIDEO_PATH \ + --config_file $CONFIG_FILE \ + --max_frames_to_read $GT_DIR + + ``` + +6. **Run the inference job using the SLURM scheduler** + + To launch a job, use the `sbatch` command with the relevant training script: + + ``` + sbatch + ``` + +7. **Check the status of the training job** + + To do this, we can: + + - Check the SLURM logs: these should be created automatically in the directory from which the `sbatch` command is run. + - Run supporting SLURM commands (see [below](#some-useful-slurm-commands)). + - Check the MLFlow logs. To do this, first create or activate an existing conda environment with `mlflow` installed, and then run the `mlflow` command from the login node. + + - Create and activate a conda environment. + ``` + module load miniconda + conda create -n mlflow-env python=3.10 mlflow -y + conda activate mlflow-env + ``` + - Run `mlflow` to visualise the results logged to the `ml-runs` folder. + + - If using the "scratch" folder: + + ``` + mlflow ui --backend-store-uri file:////ceph/zoo/users/sminano/ml-runs-all/ml-runs-scratch + ``` + + - If using the selected runs folder: + + ``` + mlflow ui --backend-store-uri file:////ceph/zoo/users/sminano/ml-runs-all/ml-runs + ``` + +### Some useful SLURM commands + +To check the status of your jobs in the queue + +``` +squeue -u +``` + +To show details of the latest jobs (including completed or cancelled jobs) + +``` +sacct -X -u +``` + +To specify columns to display use `--format` (e.g., `Elapsed`) + +``` +sacct -X --format="JobID, JobName, Partition, Account, State, Elapsed" -u +``` + +To check specific jobs by ID + +``` +sacct -X -j 3813494,3813184 +``` + +To check the time limit of the jobs submitted by a user (for example, `sminano`) + +``` +squeue -u sminano --format="%i %P %j %u %T %l %C %S" +``` + +To cancel a job + +``` +scancel +``` From 1a5d85370b27f6eb0188353809afed649c4d5ff1 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 12:39:21 +0100 Subject: [PATCH 25/52] debugging --- crabs/detection_tracking/inference_model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index e21154ab..e5d85f21 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -114,6 +114,8 @@ def load_video(self) -> None: # create directory to save output os.makedirs(self.args.output_dir, exist_ok=True) + print("output_dir:", self.args.output_dir) + print("save_video:", self.config["save_video"]) if self.config["save_video"]: frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -124,7 +126,7 @@ def load_video(self) -> None: self.args.output_dir, f"{os.path.basename(self.video_file_root)}_output_video.mp4", ) - + print(output_file) output_codec = cv2.VideoWriter_fourcc(*"H264") self.video_output = cv2.VideoWriter( output_file, output_codec, cap_fps, (frame_width, frame_height) From 8ca41c3fb01693bc6e9e7dbda537595e1f3c5e6d Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 13:15:10 +0100 Subject: [PATCH 26/52] fixed codec --- crabs/detection_tracking/inference_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index e5d85f21..c48690b5 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -127,10 +127,11 @@ def load_video(self) -> None: f"{os.path.basename(self.video_file_root)}_output_video.mp4", ) print(output_file) - output_codec = cv2.VideoWriter_fourcc(*"H264") + output_codec = cv2.VideoWriter_fourcc('m','p','4','v') self.video_output = cv2.VideoWriter( output_file, output_codec, cap_fps, (frame_width, frame_height) ) + print(self.video_output) def prep_csv_writer(self) -> Tuple[Any, TextIO]: """ From be6cff9644db63479c14fe12ebd548004d41d1ac Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 13:45:03 +0100 Subject: [PATCH 27/52] cleaned up --- crabs/detection_tracking/inference_model.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index c48690b5..edeeb2ea 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -114,8 +114,6 @@ def load_video(self) -> None: # create directory to save output os.makedirs(self.args.output_dir, exist_ok=True) - print("output_dir:", self.args.output_dir) - print("save_video:", self.config["save_video"]) if self.config["save_video"]: frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -126,12 +124,10 @@ def load_video(self) -> None: self.args.output_dir, f"{os.path.basename(self.video_file_root)}_output_video.mp4", ) - print(output_file) - output_codec = cv2.VideoWriter_fourcc('m','p','4','v') + output_codec = cv2.VideoWriter_fourcc(*"avc1") self.video_output = cv2.VideoWriter( output_file, output_codec, cap_fps, (frame_width, frame_height) ) - print(self.video_output) def prep_csv_writer(self) -> Tuple[Any, TextIO]: """ @@ -144,7 +140,6 @@ def prep_csv_writer(self) -> Tuple[Any, TextIO]: self.tracking_output_dir = ( crabs_tracks_label_dir / self.video_file_root ) - print(self.tracking_output_dir, flush=True) # Create the subdirectory for the specific video file root self.tracking_output_dir.mkdir(parents=True, exist_ok=True) @@ -310,7 +305,6 @@ def run_inference(self): # Loop through frames of the video in batches while self.video.isOpened(): - print("frame number:", frame_number, flush=True) # Break if beyond end frame (mostly for debugging) if ( self.args.max_frames_to_read From 7117511db8d029a9e376632505249c19e2eec608 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 13:54:49 +0100 Subject: [PATCH 28/52] adding gt_dir --- bash_scripts/run_inference.sh | 10 +++++++--- crabs/detection_tracking/config/inference_config.yaml | 4 ++-- guides/InferenceModelHPC.md | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_inference.sh index 4e09a296..3df6da15 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_inference.sh @@ -38,14 +38,18 @@ MLFLOW_FOLDER=/ceph/zoo/users/sminano/ml-runs-all/ml-runs-scratch # video and inference config - VIDEO_PATH=/ceph/zoo/users/sminano/crabs_reencoded_videos/Sep2023_day1_reencoded/04.09.2023-04-Right_RE.mp4 + VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_output_video.mp4 CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/detection_tracking/config/inference_config.yaml # checkpoint CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt - #output directory + # output directory OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output + + # ground truth is available + GT_DIR=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv + # version of the codebase GIT_BRANCH=nikkna/inference_cluster @@ -97,4 +101,4 @@ echo "-----" --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --output_dir $OUTPUT_DIR \ - --max_frames_to_read 10 + --gt_dir $GT_DIR diff --git a/crabs/detection_tracking/config/inference_config.yaml b/crabs/detection_tracking/config/inference_config.yaml index 720fb1c1..a6905b40 100644 --- a/crabs/detection_tracking/config/inference_config.yaml +++ b/crabs/detection_tracking/config/inference_config.yaml @@ -5,7 +5,7 @@ max_age: 10 # Minimum number of associated detections before track is initialised min_hits: 1 # save video inference -save_video: True +save_video: False # Save predicted tracks in VIA csv format and export corresponding frames # This is useful to prepare for manual labelling of tracks -save_csv_and_frames: True +save_csv_and_frames: False diff --git a/guides/InferenceModelHPC.md b/guides/InferenceModelHPC.md index ba5c79d8..ce82652d 100644 --- a/guides/InferenceModelHPC.md +++ b/guides/InferenceModelHPC.md @@ -76,7 +76,7 @@ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ - --max_frames_to_read $GT_DIR + --gt_dir $GT_DIR ``` From 45cd8bdb4c9678e90f075a0ebabf3f6330e285c4 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 14:04:37 +0100 Subject: [PATCH 29/52] codev revert --- crabs/detection_tracking/inference_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index edeeb2ea..68bd00ec 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -124,7 +124,7 @@ def load_video(self) -> None: self.args.output_dir, f"{os.path.basename(self.video_file_root)}_output_video.mp4", ) - output_codec = cv2.VideoWriter_fourcc(*"avc1") + output_codec = cv2.VideoWriter_fourcc("m", "p", "4", "v") self.video_output = cv2.VideoWriter( output_file, output_codec, cap_fps, (frame_width, frame_height) ) From 6077a7e213a75da9ed5a36c85be80b773e63332e Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 21 Jun 2024 20:19:34 +0100 Subject: [PATCH 30/52] adding some logging --- crabs/detection_tracking/inference_model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py index 68bd00ec..c815f84f 100644 --- a/crabs/detection_tracking/inference_model.py +++ b/crabs/detection_tracking/inference_model.py @@ -1,5 +1,6 @@ import argparse import csv +import logging import os import sys from pathlib import Path @@ -329,12 +330,13 @@ def run_inference(self): frame_number += 1 if self.args.gt_dir: + logging.info("evaluate the tracking") gt_boxes_list = get_ground_truth_data(self.args.gt_dir) mota_values = self.evaluate_tracking( gt_boxes_list, self.tracked_list, self.config["iou_threshold"] ) overall_mota = np.mean(mota_values) - print("Overall MOTA:", overall_mota) + logging.info("Overall MOTA:", overall_mota) # Close input video self.video.release() From a11420057fd1d9127be4f530abdfd5210fb94fdd Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 28 Jun 2024 20:37:02 +0100 Subject: [PATCH 31/52] cleaned up rebase --- .../config/inference_config.yaml | 11 - crabs/detection_tracking/inference_model.py | 426 ------------------ crabs/detection_tracking/sort.py | 301 ------------- crabs/detection_tracking/tracking_utils.py | 355 --------------- 4 files changed, 1093 deletions(-) delete mode 100644 crabs/detection_tracking/config/inference_config.yaml delete mode 100644 crabs/detection_tracking/inference_model.py delete mode 100644 crabs/detection_tracking/sort.py delete mode 100644 crabs/detection_tracking/tracking_utils.py diff --git a/crabs/detection_tracking/config/inference_config.yaml b/crabs/detection_tracking/config/inference_config.yaml deleted file mode 100644 index a6905b40..00000000 --- a/crabs/detection_tracking/config/inference_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -iou_threshold: 0.1 -score_threshold: 0.1 -# Maximum number of frames to keep alive a track without associated detections. -max_age: 10 -# Minimum number of associated detections before track is initialised -min_hits: 1 -# save video inference -save_video: False -# Save predicted tracks in VIA csv format and export corresponding frames -# This is useful to prepare for manual labelling of tracks -save_csv_and_frames: False diff --git a/crabs/detection_tracking/inference_model.py b/crabs/detection_tracking/inference_model.py deleted file mode 100644 index cf067a27..00000000 --- a/crabs/detection_tracking/inference_model.py +++ /dev/null @@ -1,426 +0,0 @@ -import argparse -import csv -import logging -import os -import sys -from pathlib import Path -from typing import Any, Optional, TextIO, Tuple - -import cv2 -import numpy as np -import torch -import torchvision.transforms.v2 as transforms -import yaml # type: ignore - -from crabs.detection_tracking.models import FasterRCNN -from crabs.detection_tracking.sort import Sort -from crabs.detection_tracking.tracking_utils import ( - evaluate_mota, - get_ground_truth_data, - save_frame_and_csv, - write_tracked_bbox_to_csv, -) -from crabs.detection_tracking.visualization import draw_bbox - -DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -class DetectorInference: - """ - A class for performing object detection or tracking inference on a video - using a trained model. - - Parameters - ---------- - args : argparse.Namespace) - Command-line arguments containing configuration settings. - - Attributes - ---------- - args : argparse.Namespace - The command-line arguments provided. - video_path : str - The path to the input video. - sort_tracker : Sort - An instance of the sorting algorithm used for tracking. - """ - - def __init__(self, args: argparse.Namespace) -> None: - self.args = args - self.config_file = args.config_file - self.video_path = args.video_path - - self.video_file_root = f"{Path(self.video_path).stem}" - self.trained_model = self.load_trained_model() - self.load_config_yaml() - self.sort_tracker = Sort( - max_age=self.config["max_age"], - min_hits=self.config["min_hits"], - iou_threshold=self.config["iou_threshold"], - ) - - def load_config_yaml(self): - with open(self.config_file, "r") as f: - self.config = yaml.safe_load(f) - - def load_trained_model(self) -> torch.nn.Module: - """ - Load the trained model. - - Returns - ------- - torch.nn.Module - """ - # Get trained model - trained_model = FasterRCNN.load_from_checkpoint( - self.args.checkpoint_path - ) - trained_model.eval() - trained_model.to(DEVICE) - return trained_model - - def prep_sort(self, prediction: dict) -> np.ndarray: - """ - Put predictions in format expected by SORT - - Parameters - ---------- - prediction : dict - The dictionary containing predicted bounding boxes, scores, and labels. - - Returns - ------- - np.ndarray: - An array containing sorted bounding boxes of detected objects. - """ - pred_boxes = prediction[0]["boxes"].detach().cpu().numpy() - pred_scores = prediction[0]["scores"].detach().cpu().numpy() - pred_labels = prediction[0]["labels"].detach().cpu().numpy() - - pred_sort = [] - for box, score, label in zip(pred_boxes, pred_scores, pred_labels): - if score > self.config["score_threshold"]: - bbox = np.concatenate((box, [score])) - pred_sort.append(bbox) - - return np.asarray(pred_sort) - - def load_video(self) -> None: - """ - Load the input video, and prepare the output video if required. - """ - self.video = cv2.VideoCapture(self.video_path) - if not self.video.isOpened(): - raise Exception("Error opening video file") - - # create directory to save output - os.makedirs(self.args.output_dir, exist_ok=True) - - if self.config["save_video"]: - frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) - frame_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)) - cap_fps = self.video.get(cv2.CAP_PROP_FPS) - - output_file = os.path.join( - self.args.output_dir, - f"{os.path.basename(self.video_file_root)}_output_video.mp4", - ) - output_codec = cv2.VideoWriter_fourcc("m", "p", "4", "v") - self.video_output = cv2.VideoWriter( - output_file, output_codec, cap_fps, (frame_width, frame_height) - ) - - def prep_csv_writer(self) -> Tuple[Any, TextIO]: - """ - Prepare csv writer to output tracking results - """ - - crabs_tracks_label_dir = ( - Path(self.args.output_dir) / "crabs_tracks_label" - ) - self.tracking_output_dir = ( - crabs_tracks_label_dir / self.video_file_root - ) - # Create the subdirectory for the specific video file root - self.tracking_output_dir.mkdir(parents=True, exist_ok=True) - - csv_file = open( - f"{str(self.tracking_output_dir / self.video_file_root)}.csv", - "w", - ) - csv_writer = csv.writer(csv_file) - - # write header following VIA convention - # https://www.robots.ox.ac.uk/~vgg/software/via/docs/face_track_annotation.html - csv_writer.writerow( - ( - "filename", - "file_size", - "file_attributes", - "region_count", - "region_id", - "region_shape_attributes", - "region_attributes", - ) - ) - - return csv_writer, csv_file - - def evaluate_tracking( - self, - gt_boxes_list: list, - tracked_boxes_list: list, - ) -> list[float]: - """ - Evaluate tracking performance using the Multi-Object Tracking Accuracy (MOTA) metric. - - Parameters - ---------- - gt_boxes_list : list[list[float]] - List of ground truth bounding boxes for each frame. - tracked_boxes_list : list[list[float]] - List of tracked bounding boxes for each frame. - - Returns - ------- - list[float]: - The computed MOTA (Multi-Object Tracking Accuracy) score for the tracking performance. - """ - mota_values = [] - prev_frame_ids: Optional[list[list[int]]] = None - # prev_frame_ids = None - for gt_boxes, tracked_boxes in zip(gt_boxes_list, tracked_boxes_list): - mota = evaluate_mota( - gt_boxes, - tracked_boxes, - self.config["iou_threshold"], - prev_frame_ids, - ) - mota_values.append(mota) - # Update previous frame IDs for the next iteration - prev_frame_ids = [[box[-1] for box in tracked_boxes]] - - return mota_values - - def get_prediction(self, frame: np.ndarray) -> torch.Tensor: - """ - Get prediction from the trained model for a given frame. - - Parameters - ---------- - frame : np.ndarray - The input frame for which prediction is to be obtained. - - Returns - ------- - torch.Tensor: - The prediction tensor from the trained model. - """ - transform = transforms.Compose( - [ - transforms.ToImage(), - transforms.ToDtype(torch.float32, scale=True), - ] - ) - img = transform(frame).to(DEVICE) - img = img.unsqueeze(0) - with torch.no_grad(): - prediction = self.trained_model(img) - return prediction - - def update_tracking(self, prediction: dict) -> list[list[float]]: - """ - Update the tracking system with the latest prediction. - - Parameters - ---------- - prediction : dict - Dictionary containing predicted bounding boxes, scores, and labels. - - Returns - ------- - list[list[float]]: - list of tracked bounding boxes after updating the tracking system. - """ - pred_sort = self.prep_sort(prediction) - tracked_boxes = self.sort_tracker.update(pred_sort) - self.tracked_list.append(tracked_boxes) - return tracked_boxes - - def save_required_output( - self, - tracked_boxes: list[list[float]], - frame: np.ndarray, - frame_number: int, - ) -> None: - """ - Handle the output based argument options. - - Parameters - ---------- - tracked_boxes : list[list[float]] - list of tracked bounding boxes. - frame : np.ndarray - The current frame. - frame_number : int - The frame number. - """ - frame_name = f"{self.video_file_root}_frame_{frame_number:08d}.png" - if self.config["save_csv_and_frames"]: - save_frame_and_csv( - frame_name, - self.tracking_output_dir, - tracked_boxes, - frame, - frame_number, - self.csv_writer, - ) - else: - for bbox in tracked_boxes: - write_tracked_bbox_to_csv( - bbox, frame, frame_name, self.csv_writer - ) - - if self.config["save_video"]: - frame_copy = frame.copy() - for bbox in tracked_boxes: - xmin, ymin, xmax, ymax, id = bbox - draw_bbox( - frame_copy, - (xmin, ymin), - (xmax, ymax), - (0, 0, 255), - f"id : {int(id)}", - ) - self.video_output.write(frame_copy) - - def run_inference(self): - """ - Run object detection + tracking on the video frames. - """ - # initialisation - frame_number = 1 - self.tracked_list = [] - - self.csv_writer, csv_file = self.prep_csv_writer() - - # Loop through frames of the video in batches - while self.video.isOpened(): - # Break if beyond end frame (mostly for debugging) - if ( - self.args.max_frames_to_read - and frame_number > self.args.max_frames_to_read - ): - break - - # read frame - ret, frame = self.video.read() - if not ret: - print("No frame read. Exiting...") - break - - prediction = self.get_prediction(frame) - - # run tracking - self.prep_sort(prediction) - tracked_boxes = self.update_tracking(prediction) - self.save_required_output(tracked_boxes, frame, frame_number) - - # update frame - frame_number += 1 - - if self.args.gt_dir: - logging.info("evaluate the tracking") - gt_boxes_list = get_ground_truth_data(self.args.gt_dir) - mota_values = self.evaluate_tracking( - gt_boxes_list, self.tracked_list, self.config["iou_threshold"] - ) - overall_mota = np.mean(mota_values) - logging.info("Overall MOTA:", overall_mota) - - # Close input video - self.video.release() - - # Close outputs - if self.config["save_video"]: - self.video_output.release() - - if self.config["save_csv_and_frames"]: - csv_file.close() - - -def main(args) -> None: - """ - Main function to run the inference on video based on the trained model. - - Parameters - ---------- - args : argparse - Arguments or configuration settings for testing. - - Returns - ------- - None - """ - - inference = DetectorInference(args) - inference.load_video() - inference.run_inference() - - -def inference_parse_args(args): - parser = argparse.ArgumentParser() - parser.add_argument( - "--checkpoint_path", - type=str, - required=True, - help="location of checkpoint of the trained model", - ) - parser.add_argument( - "--video_path", - type=str, - required=True, - help="location of images and coco annotation", - ) - parser.add_argument( - "--config_file", - type=str, - default=str( - Path(__file__).parent / "config" / "inference_config.yaml" - ), - help=( - "Location of YAML config to control training. " - "Default: crabs-exploration/crabs/detection_tracking/config/inference_config.yaml" - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="crabs_track_output", - help="Directory to save the track output", - ) - parser.add_argument( - "--max_frames_to_read", - type=int, - default=None, - help="Maximum number of frames to read (mostly for debugging).", - ) - parser.add_argument( - "--gt_dir", - type=str, - default=None, - help="Location of json file containing ground truth annotations.", - ) - return parser.parse_args(args) - - -def app_wrapper(): - torch.set_float32_matmul_precision("medium") - - inference_args = inference_parse_args(sys.argv[1:]) - main(inference_args) - - -if __name__ == "__main__": - app_wrapper() - diff --git a/crabs/detection_tracking/sort.py b/crabs/detection_tracking/sort.py deleted file mode 100644 index 8f291b78..00000000 --- a/crabs/detection_tracking/sort.py +++ /dev/null @@ -1,301 +0,0 @@ -""" -SORT: A Simple, Online and Realtime Tracker -Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -""" - -from __future__ import print_function - -import numpy as np -from filterpy.kalman import KalmanFilter - - -def linear_assignment(cost_matrix): - try: - import lap - - _, x, y = lap.lapjv(cost_matrix, extend_cost=True) - return np.array([[y[i], i] for i in x if i >= 0]) # - except ImportError: - from scipy.optimize import linear_sum_assignment - - x, y = linear_sum_assignment(cost_matrix) - return np.array(list(zip(x, y))) - - -def iou_batch(bb_test, bb_gt): - """ - From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2] - """ - bb_gt = np.expand_dims(bb_gt, 0) - bb_test = np.expand_dims(bb_test, 1) - - xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0]) - yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) - xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) - yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) - w = np.maximum(0.0, xx2 - xx1) - h = np.maximum(0.0, yy2 - yy1) - wh = w * h - o = wh / ( - (bb_test[..., 2] - bb_test[..., 0]) - * (bb_test[..., 3] - bb_test[..., 1]) - + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - - wh - ) - return o - - -def convert_bbox_to_z(bbox): - """ - Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form - [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is - the aspect ratio - """ - w = bbox[2] - bbox[0] - h = bbox[3] - bbox[1] - x = bbox[0] + w / 2.0 - y = bbox[1] + h / 2.0 - s = w * h # scale is just area - r = w / float(h) - return np.array([x, y, s, r]).reshape((4, 1)) - - -def convert_x_to_bbox(x, score=None): - """ - Takes a bounding box in the centre form [x,y,s,r] and returns it in the form - [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right - """ - w = np.sqrt(x[2] * x[3]) - h = x[2] / w - if score is None: - return np.array( - [x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0] - ).reshape((1, 4)) - else: - return np.array( - [ - x[0] - w / 2.0, - x[1] - h / 2.0, - x[0] + w / 2.0, - x[1] + h / 2.0, - score, - ] - ).reshape((1, 5)) - - -class KalmanBoxTracker(object): - """ - This class represents the internal state of individual tracked objects - observed as bbox. - """ - - count = 0 - - def __init__(self, bbox): - """ - Initialises a tracker using initial bounding box. - """ - # define constant velocity model - self.kf = KalmanFilter(dim_x=7, dim_z=4) - self.kf.F = np.array( - [ - [1, 0, 0, 0, 1, 0, 0], - [0, 1, 0, 0, 0, 1, 0], - [0, 0, 1, 0, 0, 0, 1], - [0, 0, 0, 1, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0, 1], - ] - ) - self.kf.H = np.array( - [ - [1, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 1, 0, 0, 0], - ] - ) - - self.kf.R[2:, 2:] *= 10.0 - self.kf.P[ - 4:, 4: - ] *= 1000.0 # give high uncertainty to the unobservable initial velocities - self.kf.P *= 10.0 - self.kf.Q[-1, -1] *= 0.01 - self.kf.Q[4:, 4:] *= 0.01 - - self.kf.x[:4] = convert_bbox_to_z(bbox) - self.time_since_update = 0 - self.id = KalmanBoxTracker.count - KalmanBoxTracker.count += 1 - self.history = [] - self.hits = 0 - self.hit_streak = 0 - self.age = 0 - - def update(self, bbox): - """ - Updates the state vector with observed bbox. - """ - self.time_since_update = 0 - self.history = [] - self.hits += 1 - self.hit_streak += 1 - self.kf.update(convert_bbox_to_z(bbox)) - - def predict(self): - """ - Advances the state vector and returns the predicted bounding box estimate. - """ - if (self.kf.x[6] + self.kf.x[2]) <= 0: - self.kf.x[6] *= 0.0 - self.kf.predict() - self.age += 1 - if self.time_since_update > 0: - self.hit_streak = 0 - self.time_since_update += 1 - self.history.append(convert_x_to_bbox(self.kf.x)) - return self.history[-1] - - def get_state(self): - """ - Returns the current bounding box estimate. - """ - return convert_x_to_bbox(self.kf.x) - - -def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3): - """ - Assigns detections to tracked object (both represented as bounding boxes) - - Returns 3 lists of matches, unmatched_detections and unmatched_trackers - """ - if len(trackers) == 0: - return ( - np.empty((0, 2), dtype=int), - np.arange(len(detections)), - np.empty((0, 5), dtype=int), - ) - - iou_matrix = iou_batch(detections, trackers) - - if min(iou_matrix.shape) > 0: - a = (iou_matrix > iou_threshold).astype(np.int32) - if a.sum(1).max() == 1 and a.sum(0).max() == 1: - matched_indices = np.stack(np.where(a), axis=1) - else: - matched_indices = linear_assignment(-iou_matrix) - else: - matched_indices = np.empty(shape=(0, 2)) - - unmatched_detections = [] - for d, det in enumerate(detections): - if d not in matched_indices[:, 0]: - unmatched_detections.append(d) - unmatched_trackers = [] - for t, trk in enumerate(trackers): - if t not in matched_indices[:, 1]: - unmatched_trackers.append(t) - - # filter out matched with low IOU - matches = [] - for m in matched_indices: - if iou_matrix[m[0], m[1]] < iou_threshold: - unmatched_detections.append(m[0]) - unmatched_trackers.append(m[1]) - else: - matches.append(m.reshape(1, 2)) - if len(matches) == 0: - matches = np.empty((0, 2), dtype=int) - else: - matches = np.concatenate(matches, axis=0) - - return ( - matches, - np.array(unmatched_detections), - np.array(unmatched_trackers), - ) - - -class Sort(object): - def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3): - """ - Sets key parameters for SORT - """ - self.max_age = max_age - self.min_hits = min_hits - self.iou_threshold = iou_threshold - self.trackers = [] - self.frame_count = 0 - - def update(self, dets=np.empty((0, 5))): - """ - Params: - dets - a numpy array of detections in the format - [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...] - Requires: this method must be called once for each frame even with - empty detections (use np.empty((0, 5)) for frames without detections). - Returns the a similar array, where the last column is the object ID. - - NOTE: The number of objects returned may differ from the number of - detections provided. - """ - self.frame_count += 1 - # get predicted locations from existing trackers. - trks = np.zeros((len(self.trackers), 5)) - to_del = [] - ret = [] - for t, trk in enumerate(trks): - pos = self.trackers[t].predict()[0] - trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] - if np.any(np.isnan(pos)): - to_del.append(t) - trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) - for t in reversed(to_del): - self.trackers.pop(t) - ( - matched, - unmatched_dets, - unmatched_trks, - ) = associate_detections_to_trackers(dets, trks, self.iou_threshold) - - # update matched trackers with assigned detections - for m in matched: - self.trackers[m[1]].update(dets[m[0], :]) - - # create and initialise new trackers for unmatched detections - for i in unmatched_dets: - trk = KalmanBoxTracker(dets[i, :]) - self.trackers.append(trk) - i = len(self.trackers) - for trk in reversed(self.trackers): - d = trk.get_state()[0] - if (trk.time_since_update < 1) and ( - trk.hit_streak >= self.min_hits - or self.frame_count <= self.min_hits - ): - ret.append( - np.concatenate((d, [trk.id + 1])).reshape(1, -1) - ) # +1 as MOT benchmark requires positive - i -= 1 - # remove dead tracklet - if trk.time_since_update > self.max_age: - self.trackers.pop(i) - if len(ret) > 0: - return np.concatenate(ret) - return np.empty((0, 5)) - diff --git a/crabs/detection_tracking/tracking_utils.py b/crabs/detection_tracking/tracking_utils.py deleted file mode 100644 index b68cfa48..00000000 --- a/crabs/detection_tracking/tracking_utils.py +++ /dev/null @@ -1,355 +0,0 @@ -import csv -import json -import logging -from pathlib import Path -from typing import Any, Dict, Optional - -import cv2 -import numpy as np - - -def calculate_iou(box1: np.ndarray, box2: np.ndarray) -> float: - """ - Calculate IoU (Intersection over Union) of two bounding boxes. - - Parameters - ---------- - box1 (np.ndarray): - Coordinates [x1, y1, x2, y2] of the first bounding box. - Here, (x1, y1) represents the top-left corner, and (x2, y2) represents the bottom-right corner. - box2 (np.ndarray): - Coordinates [x1, y1, x2, y2] of the second bounding box. - Here, (x1, y1) represents the top-left corner, and (x2, y2) represents the bottom-right corner. - - Returns - ------- - float: - IoU value. - """ - x1_box1, y1_box1, x2_box1, y2_box1 = box1 - x1_box2, y1_box2, x2_box2, y2_box2 = box2 - - # Calculate intersection coordinates - x1_intersect = max(x1_box1, x1_box2) - y1_intersect = max(y1_box1, y1_box2) - x2_intersect = min(x2_box1, x2_box2) - y2_intersect = min(y2_box1, y2_box2) - - # Calculate area of intersection rectangle - intersect_width = max(0, x2_intersect - x1_intersect + 1) - intersect_height = max(0, y2_intersect - y1_intersect + 1) - intersect_area = intersect_width * intersect_height - - # Calculate area of individual bounding boxes - box1_area = (x2_box1 - x1_box1 + 1) * (y2_box1 - y1_box1 + 1) - box2_area = (x2_box2 - x1_box2 + 1) * (y2_box2 - y1_box2 + 1) - - iou = intersect_area / float(box1_area + box2_area - intersect_area) - - return iou - - -def count_identity_switches( - prev_frame_ids: Optional[list[list[int]]], - current_frame_ids: Optional[list[list[int]]], -) -> int: - """ - Count the number of identity switches between two sets of object IDs. - - Parameters - ---------- - prev_frame_ids : Optional[list[list[int]]] - List of object IDs in the previous frame. - current_frame_ids : Optional[list[list[int]]] - List of object IDs in the current frame. - - Returns - ------- - int - The number of identity switches between the two sets of object IDs. - """ - - if prev_frame_ids is None or current_frame_ids is None: - return 0 - - # Initialize count of identity switches - num_switches = 0 - - prev_ids = set(prev_frame_ids[0]) - current_ids = set(current_frame_ids[0]) - - # Calculate the number of switches by finding the difference in IDs - num_switches = len(prev_ids.symmetric_difference(current_ids)) - - return num_switches - - -def evaluate_mota( - gt_boxes: np.ndarray, - tracked_boxes: np.ndarray, - iou_threshold: float, - prev_frame_ids: Optional[list[list[int]]], -) -> float: - """ - Evaluate MOTA (Multiple Object Tracking Accuracy). - - MOTA is a metric used to evaluate the performance of object tracking algorithms. - - Parameters - ---------- - gt_boxes : np.ndarray - Ground truth bounding boxes of objects. - tracked_boxes : np.ndarray - Tracked bounding boxes of objects. - iou_threshold : float - Intersection over Union (IoU) threshold for considering a match. - prev_frame_ids : Optional[list[list[int]]] - IDs from the previous frame for identity switch detection. - - Returns - ------- - float - The computed MOTA (Multi-Object Tracking Accuracy) score for the tracking performance. - - Notes - ----- - MOTA is calculated using the following formula: - - MOTA = 1 - (Missed Detections + False Positives + Identity Switches) / Total Ground Truth - - - Missed Detections: Instances where the ground truth objects were not detected by the tracking algorithm. - - False Positives: Instances where the tracking algorithm produces a detection where there is no corresponding ground truth object. - - Identity Switches: Instances where the tracking algorithm assigns a different ID to an object compared to its ID in the previous frame. - - Total Ground Truth: The total number of ground truth objects in the scene. - - The MOTA score ranges from 0 to 1, with higher values indicating better tracking performance. - A MOTA score of 1 indicates perfect tracking, where there are no missed detections, false positives, or identity switches. - """ - total_gt = len(gt_boxes) - false_positive = 0 - - for i, tracked_box in enumerate(tracked_boxes): - best_iou = 0.0 - best_match = None - - for j, gt_box in enumerate(gt_boxes): - iou = calculate_iou(gt_box[:4], tracked_box[:4]) - if iou > iou_threshold and iou > best_iou: - best_iou = iou - best_match = j - if best_match is not None: - # successfully found a matching ground truth box for the tracked box. - # set the corresponding ground truth box to None. - gt_boxes[best_match] = None - else: - false_positive += 1 - - missed_detections = 0 - for box in gt_boxes: - if box is not None and not np.all(np.isnan(box)): - # if true ground truth box was not matched with any tracked box - missed_detections += 1 - - tracked_ids = [[box[-1] for box in tracked_boxes]] - - num_switches = count_identity_switches(prev_frame_ids, tracked_ids) - - mota = 1 - (missed_detections + false_positive + num_switches) / total_gt - return mota - - -def extract_bounding_box_info(row: list[str]) -> Dict[str, Any]: - """ - Extracts bounding box information from a row of data. - - Parameters - ---------- - row : list[str] - A list representing a row of data containing information about a bounding box. - - Returns - ------- - Dict[str, Any]: - A dictionary containing the extracted bounding box information. - """ - filename = row[0] - region_shape_attributes = json.loads(row[5]) - region_attributes = json.loads(row[6]) - - x = region_shape_attributes["x"] - y = region_shape_attributes["y"] - width = region_shape_attributes["width"] - height = region_shape_attributes["height"] - track_id = region_attributes["track"] - - frame_number = int(filename.split("_")[-1].split(".")[0]) - 1 - return { - "frame_number": frame_number, - "x": x, - "y": y, - "width": width, - "height": height, - "id": track_id, - } - - -def create_gt_list( - ground_truth_data: list[Dict[str, Any]], gt_boxes_list: list[np.ndarray] -) -> list[np.ndarray]: - """ - Creates a list of ground truth bounding boxes organized by frame number. - - Parameters - ---------- - ground_truth_data : list[Dict[str, Any]] - A list containing ground truth bounding box data organized by frame number. - gt_boxes_list : list[np.ndarray] - A list to store the ground truth bounding boxes for each frame. - - Returns - ------- - list[np.ndarray]: - A list containing ground truth bounding boxes organized by frame number. - """ - for data in ground_truth_data: - frame_number = data["frame_number"] - bbox = np.array( - [ - data["x"], - data["y"], - data["x"] + data["width"], - data["y"] + data["height"], - data["id"], - ], - dtype=np.float32, - ) - if gt_boxes_list[frame_number].size == 0: - gt_boxes_list[frame_number] = bbox.reshape( - 1, -1 - ) # Initialize as a 2D array - else: - gt_boxes_list[frame_number] = np.vstack( - [gt_boxes_list[frame_number], bbox] - ) - return gt_boxes_list - - -def get_ground_truth_data(gt_dir: str) -> list[np.ndarray]: - """ - Extract ground truth bounding box data from a CSV file. - - Parameters - ---------- - gt_dir : str - The path to the CSV file containing ground truth data. - - Returns - ------- - list[np.ndarray]: - A list containing ground truth bounding box data organized by frame number. - The numpy array represent the coordinates and ID of the bounding box in the order: - x, y, x + width, y + height, ID - """ - ground_truth_data = [] - max_frame_number = 0 - - # Open the CSV file and read its contents line by line - with open(gt_dir, "r") as csvfile: - csvreader = csv.reader(csvfile) - next(csvreader) # Skip the header row - for row in csvreader: - data = extract_bounding_box_info(row) - ground_truth_data.append(data) - max_frame_number = max(max_frame_number, data["frame_number"]) - - # Initialize a list to store the ground truth bounding boxes for each frame - gt_boxes_list = [np.array([]) for _ in range(max_frame_number + 1)] - - gt_boxes_list = create_gt_list(ground_truth_data, gt_boxes_list) - return gt_boxes_list - - -def write_tracked_bbox_to_csv( - bbox: np.ndarray, - frame: np.ndarray, - frame_name: str, - csv_writer: Any, -) -> None: - """ - Write bounding box annotation to a CSV file. - - Parameters - ---------- - bbox : np.ndarray - A numpy array containing the bounding box coordinates - (xmin, ymin, xmax, ymax, id). - frame : np.ndarray - The frame to which the bounding box belongs. - frame_name : str - The name of the frame. - csv_writer : Any - The CSV writer object to write the annotation. - """ - # Bounding box geometry - xmin, ymin, xmax, ymax, id = bbox - width_box = int(xmax - xmin) - height_box = int(ymax - ymin) - - # Add to csv - csv_writer.writerow( - ( - frame_name, - frame.size, - '{{"clip":{}}}'.format("123"), - 1, - 0, - '{{"name":"rect","x":{},"y":{},"width":{},"height":{}}}'.format( - xmin, ymin, width_box, height_box - ), - '{{"track":"{}"}}'.format(int(id)), - ) - ) - - -def save_frame_and_csv( - frame_name: str, - tracking_output_dir: Path, - tracked_boxes: list[list[float]], - frame: np.ndarray, - frame_number: int, - csv_writer: Any, -) -> None: - """ - Save tracked bounding boxes as frames and write to a CSV file. - - Parameters - ---------- - video_file_root : str - The root path of the video file. - tracking_output_dir : Path - The directory where tracked frames and CSV file will be saved. - tracked_boxes : list[list[float]] - List of bounding boxes to be saved. - frame : np.ndarray - The frame image. - frame_number : int - The frame number. - csv_writer : Any - CSV writer object for writing bounding box data. - - Returns - ------- - None - """ - for bbox in tracked_boxes: - # Add bbox to csv - write_tracked_bbox_to_csv(bbox, frame, frame_name, csv_writer) - - # Save frame as PNG - once as per frame - frame_path = tracking_output_dir / frame_name - img_saved = cv2.imwrite(str(frame_path), frame) - if not img_saved: - logging.error( - f"Didn't save {frame_name}, frame {frame_number}, Skipping." - ) - From 17146adf8aa0cc213da9933623d46152898cd627 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Fri, 28 Jun 2024 20:45:17 +0100 Subject: [PATCH 32/52] some changes based on the new modules --- .../{run_inference.sh => run_tracking.sh} | 66 +++++++++---------- ...ferenceModelHPC.md => TrackingModelHPC.md} | 8 +-- 2 files changed, 35 insertions(+), 39 deletions(-) rename bash_scripts/{run_inference.sh => run_tracking.sh} (58%) rename guides/{InferenceModelHPC.md => TrackingModelHPC.md} (96%) diff --git a/bash_scripts/run_inference.sh b/bash_scripts/run_tracking.sh similarity index 58% rename from bash_scripts/run_inference.sh rename to bash_scripts/run_tracking.sh index 3df6da15..1eb8591f 100644 --- a/bash_scripts/run_inference.sh +++ b/bash_scripts/run_tracking.sh @@ -33,13 +33,9 @@ # Define variables # ---------------------- - # mlflow - EXPERIMENT_NAME="Sept2023_inference" - MLFLOW_FOLDER=/ceph/zoo/users/sminano/ml-runs-all/ml-runs-scratch - # video and inference config VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_output_video.mp4 - CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/detection_tracking/config/inference_config.yaml + CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml # checkpoint CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt @@ -48,57 +44,57 @@ OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output # ground truth is available - GT_DIR=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv + GT_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv # version of the codebase GIT_BRANCH=nikkna/inference_cluster # ----------------------------- -# Create virtual environment -# ----------------------------- -module load miniconda + # Create virtual environment + # ----------------------------- + module load miniconda -# Define a environment for each job in the -# temporary directory of the compute node -ENV_NAME=crabs-dev-$SLURM_JOB_ID -ENV_PREFIX=$TMPDIR/$ENV_NAME + # Define a environment for each job in the + # temporary directory of the compute node + ENV_NAME=crabs-dev-$SLURM_JOB_ID + ENV_PREFIX=$TMPDIR/$ENV_NAME -# create environment -conda create \ + # create environment + conda create \ --prefix $ENV_PREFIX \ -y \ python=3.10 -# activate environment -conda activate $ENV_PREFIX + # activate environment + conda activate $ENV_PREFIX -# install crabs package in virtual env -python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH + # install crabs package in virtual env + python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH -# log pip and python locations -echo $ENV_PREFIX -which python -which pip + # log pip and python locations + echo $ENV_PREFIX + which python + which pip -# print the version of crabs package (last number is the commit hash) -echo "Git branch: $GIT_BRANCH" -conda list crabs -echo "-----" + # print the version of crabs package (last number is the commit hash) + echo "Git branch: $GIT_BRANCH" + conda list crabs + echo "-----" -# ------------------------------------ -# GPU specs -# ------------------------------------ -echo "Memory used per GPU before training" -echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader -echo "-----" + # ------------------------------------ + # GPU specs + # ------------------------------------ + echo "Memory used per GPU before training" + echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader + echo "-----" # ------------------- # Run evaluation script # ------------------- - inference-detector \ + detect-and-track-video \ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --output_dir $OUTPUT_DIR \ - --gt_dir $GT_DIR + --gt_path $GT_PATH diff --git a/guides/InferenceModelHPC.md b/guides/TrackingModelHPC.md similarity index 96% rename from guides/InferenceModelHPC.md rename to guides/TrackingModelHPC.md index ce82652d..93c720ea 100644 --- a/guides/InferenceModelHPC.md +++ b/guides/TrackingModelHPC.md @@ -45,7 +45,7 @@ 4. **Edit the bash script!** - For inference, we need to ensure the correct trained model is used. All the parameters used in any training is logged into `mlflow`. + For run the tracker, we need to ensure the correct trained model is used. All the parameters used in any training is logged into `mlflow`. We can see the perfomance of each training session by inspecting the `metrics` tab in `mlflow UI` where the `training loss`, `validation precision` and `validation recall` are plotted. The trained model (`checkpoint path`) are logged in `parameters` section under `overview` tab. @@ -69,14 +69,14 @@ Additionally if we have ground truth of the video we used, we may want to add that to get the tracking evaluation: - - `GT_DIR` + - `GT_PATH` ``` - inference-detector \ + detect-and-track-video \ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ - --gt_dir $GT_DIR + --gt_PATH $GT_PATH ``` From 8284157530e9565479fcbcac30ce9a16bd4dba56 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 13:54:47 +0100 Subject: [PATCH 33/52] adding bash script for running all escape events --- .../run_tracking_all_escape_events.sh | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 bash_scripts/run_tracking_all_escape_events.sh diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh new file mode 100644 index 00000000..d3e9935f --- /dev/null +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +#SBATCH -p gpu # a100 # partition +#SBATCH --gres=gpu:1 +#SBATCH -N 1 # number of nodes +#SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node +#SBATCH --mem 64G # memory pool for all cores +#SBATCH -t 3-00:00 # time (D-HH:MM) +#SBATCH -o slurm.%A.%N.out +#SBATCH -e slurm.%A.%N.err +#SBATCH --mail-type=ALL +#SBATCH --mail-user=n.aznan@ucl.ac.uk + +# --------------------- +# Source bashrc +# ---------------------- +# Otherwise `which python` points to the miniconda module's Python +source ~/.bashrc + +# memory +# see https://pytorch.org/docs/stable/notes/cuda.html#environment-variables +PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True + +# ----------------------------- +# Error settings for bash +# ----------------------------- +# see https://wizardzines.com/comics/bash-errors/ +set -e # do not continue after errors +set -u # throw error if variable is unset +set -o pipefail # make the pipe fail if any part of it fails + +# --------------------- +# Define variables +# ---------------------- + +# video and inference config +VIDEO_DIR=/ceph/zoo/raw/CrabField/ramalhete_2023/Escapes +PATTERN="*.mov" +CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml + +# checkpoint +TRAINED_MODEL_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + +# output directory +OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/tracking_output + +# version of the codebase +GIT_BRANCH=nikkna/inference_cluster + +DEVICE="cuda" + +# Check if the target is not a directory +if [ ! -d "$VIDEO_DIR" ]; then + exit 1 +fi + +# ----------------------------- +# Create virtual environment +# ----------------------------- +module load miniconda + +# Define a environment for each job in the +# temporary directory of the compute node +ENV_NAME=crabs-dev-$SLURM_JOB_ID +ENV_PREFIX=$TMPDIR/$ENV_NAME + +# create environment +conda create \ + --prefix $ENV_PREFIX \ + -y \ + python=3.10 + +# activate environment +conda activate $ENV_PREFIX + +# install crabs package in virtual env +python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH + +# log pip and python locations +echo $ENV_PREFIX +which python +which pip + +# print the version of crabs package (last number is the commit hash) +echo "Git branch: $GIT_BRANCH" +conda list crabs +echo "-----" + +# ------------------------------------ +# GPU specs +# ------------------------------------ +echo "Memory used per GPU before training" +echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader +echo "-----" + +# ------------------- +# Run evaluation script for each .mov file in VIDEO_DIR +# ------------------- +for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do + echo "Processing video: $VIDEO_PATH" + detect-and-track-video \ + --trained_model_path $TRAINED_MODEL_PATH \ + --video_path $VIDEO_PATH \ + --config_file $CONFIG_FILE \ + --output_dir $OUTPUT_DIR \ + --device $DEVICE +done From cf04af31c4c9c11e7bb2881432fd83dbddf85c12 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 13:56:42 +0100 Subject: [PATCH 34/52] small changes on the bash script --- bash_scripts/run_tracking.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index 1eb8591f..d1656ab0 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -38,7 +38,7 @@ CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml # checkpoint - CKPT_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + TRAINED_MODEL_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt # output directory OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output @@ -49,6 +49,9 @@ # version of the codebase GIT_BRANCH=nikkna/inference_cluster + # device either cuda or cpu + DEVICE="cuda" + # ----------------------------- # Create virtual environment # ----------------------------- @@ -93,8 +96,9 @@ # Run evaluation script # ------------------- detect-and-track-video \ - --checkpoint_path $CKPT_PATH \ + --trained_model_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --output_dir $OUTPUT_DIR \ - --gt_path $GT_PATH + --gt_path $GT_PATH \ + --device $DEVICE From 8d4c5a2d174e139b3dbbf18772897be6500f41f3 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 13:59:01 +0100 Subject: [PATCH 35/52] changed to the correct video example --- bash_scripts/run_tracking.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index d1656ab0..cbda58a4 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -34,7 +34,7 @@ # ---------------------- # video and inference config - VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_output_video.mp4 + VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_video.mp4 CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml # checkpoint From 2ffce7a6f6f40a56fc5103a49c3c2b3d146e501b Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 15:09:27 +0100 Subject: [PATCH 36/52] changes of guide --- bash_scripts/run_tracking.sh | 2 +- .../run_tracking_all_escape_events.sh | 1 + guides/TrackingModelHPC.md | 40 ++++++++++++------- 3 files changed, 27 insertions(+), 16 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index cbda58a4..c919afcc 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -96,7 +96,7 @@ # Run evaluation script # ------------------- detect-and-track-video \ - --trained_model_path $CKPT_PATH \ + --trained_model_path $TRAINED_MODEL_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --output_dir $OUTPUT_DIR \ diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index d3e9935f..3a42e6e5 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -47,6 +47,7 @@ OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/tracking_output # version of the codebase GIT_BRANCH=nikkna/inference_cluster +# device either cuda or cpu DEVICE="cuda" # Check if the target is not a directory diff --git a/guides/TrackingModelHPC.md b/guides/TrackingModelHPC.md index 93c720ea..7d3fc3e0 100644 --- a/guides/TrackingModelHPC.md +++ b/guides/TrackingModelHPC.md @@ -15,32 +15,38 @@ 1. **Download the training script from the 🦀 repository** - To do so, run any of the following commands. They will download a bash script for training (`run_evaluate_single.sh` or `run_evaluate_array.sh`) to the current working directory. + To do so, run any of the following commands. They will download a bash script for tracking (`run_tracking.sh` or `run_tracking_all_escape_events.sh`) to the current working directory. The download the version of these files in the `main` branch of the [🦀 repository](https://github.com/SainsburyWellcomeCentre/crabs-exploration), run one of the following commands. - - To train a single job: download the `run_inference.sh` file + - To run video tracking on a specific video: download the `run_tracking.sh` file ``` - curl https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/main/bash_scripts/run_inference.sh > run_inference.sh + curl https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/main/bash_scripts/run_tracking.sh > run_tracking.sh + ``` + + - To run video tracking on all escape events (or on a directory): download the `run_tracking_all_escape_events.sh` file + + ``` + curl https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/main/bash_scripts/run_tracking_all_escape_events.sh > run_tracking_all_escape_events.sh ``` These bash scripts will launch a SLURM job that: - gets the 🦀 package from git, - installs it in the compute node, - - and runs a training job. + - and runs a video tracking on a specific video. > [!TIP] > To retrieve a version of these files that is different from the files at the tip of `main`, edit the remote file path in the curl command: > > - For example, to download the version of the file at the tip of a branch called ``, edit the path above to replace `main` with ``: > ``` -> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration//bash_scripts/run_training_single.sh +> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration//bash_scripts/run_tracking.sh > ``` > - To download the version of the file of a specific commit, replace `main` with `blob/`: > ``` -> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/blob//bash_scripts/run_training_single.sh +> https://raw.githubusercontent.com/SainsburyWellcomeCentre/crabs-exploration/blob//bash_scripts/run_tracking.sh > ``` 4. **Edit the bash script!** @@ -49,35 +55,39 @@ We can see the perfomance of each training session by inspecting the `metrics` tab in `mlflow UI` where the `training loss`, `validation precision` and `validation recall` are plotted. The trained model (`checkpoint path`) are logged in `parameters` section under `overview` tab. - When launching an inference job, we may want to edit in the bash script: + When launching a tacking job, we may want to edit in the bash script: - - The `CKPT_PATH` - - The `VIDEO_PATH` + - The `TRAINED_MODEL_PATH` - The `OUTPUT_DIR` + - The `VIDEO_PATH` (for `run_tracking.sh`) or `VIDEO_DIR` (for `run_tracking_all_escape_events.sh`) Less frequently, one may need to edit: - - the `CONFIG_FILE`: usually we point to the same file we used to train the model at `/ceph/zoo/users/sminano/cluster_train_config.yaml` which we can edit. Note that all config parameters are logged in MLflow, so we don't need to keep track of that file; + - the `CONFIG_FILE`: usually we point to the same file we used to train the model at `/ceph/zoo/users/sminano/cluster_tracking_config.yaml` which we can edit. - the `GIT_BRANCH`, if we want to use a specific version of the 🦀 package. Usually we will run the version of the 🦀 package in `main`. 5. **Other Inference options** - By default the inference will save the tracking output into csv. There are other options that we can enable in the config file + By default, the inference will save the tracking output into a CSV file. There are other options that we can enable in CLI arguments: - - `save_video` - - `save_csv_and_frames` + - `save_video` : This will save the tracking bounding boxes for every frame into a video output. + - `save_frames` : This will save the corresponding frames to the CSV output. This is needed if we want to correct the tracking labels. - Additionally if we have ground truth of the video we used, we may want to add that to get the tracking evaluation: + Additionally, if we have ground truth for the video we used, we may want to add that to get the tracking evaluation: - `GT_PATH` + We can add all these arguments in the bash script, for example: + ``` detect-and-track-video \ --checkpoint_path $CKPT_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --gt_PATH $GT_PATH - + --device $DEVICE + --save_video + --save_frames ``` 6. **Run the inference job using the SLURM scheduler** From 8663563fc9b50b9e7ef23b32f32dc7545db57d86 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 15:15:37 +0100 Subject: [PATCH 37/52] removed device, already set in code --- bash_scripts/run_tracking.sh | 6 +----- bash_scripts/run_tracking_all_escape_events.sh | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index c919afcc..34be127b 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -49,9 +49,6 @@ # version of the codebase GIT_BRANCH=nikkna/inference_cluster - # device either cuda or cpu - DEVICE="cuda" - # ----------------------------- # Create virtual environment # ----------------------------- @@ -100,5 +97,4 @@ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ --output_dir $OUTPUT_DIR \ - --gt_path $GT_PATH \ - --device $DEVICE + --gt_path $GT_PATH diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 3a42e6e5..d5337c45 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -47,9 +47,6 @@ OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/tracking_output # version of the codebase GIT_BRANCH=nikkna/inference_cluster -# device either cuda or cpu -DEVICE="cuda" - # Check if the target is not a directory if [ ! -d "$VIDEO_DIR" ]; then exit 1 @@ -103,6 +100,5 @@ for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do --trained_model_path $TRAINED_MODEL_PATH \ --video_path $VIDEO_PATH \ --config_file $CONFIG_FILE \ - --output_dir $OUTPUT_DIR \ - --device $DEVICE + --output_dir $OUTPUT_DIR done From 9af60eedbe95c9e346b2e33e25d7e6701884295e Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 16:03:18 +0100 Subject: [PATCH 38/52] check cuda status --- bash_scripts/run_tracking.sh | 20 ++++++++++---------- crabs/tracker/track_video.py | 5 +++++ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index 34be127b..74c3c2d2 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -1,15 +1,15 @@ #!/bin/bash - #SBATCH -p gpu # a100 # partition - #SBATCH --gres=gpu:1 - #SBATCH -N 1 # number of nodes - #SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node - #SBATCH --mem 64G # memory pool for all cores - #SBATCH -t 3-00:00 # time (D-HH:MM) - #SBATCH -o slurm.%A.%N.out - #SBATCH -e slurm.%A.%N.err - #SBATCH --mail-type=ALL - #SBATCH --mail-user=n.aznan@ucl.ac.uk +#SBATCH -p gpu # a100 # partition +#SBATCH --gres=gpu:1 +#SBATCH -N 1 # number of nodes +#SBATCH --ntasks-per-node 8 # 2 # max number of tasks per node +#SBATCH --mem 64G # memory pool for all cores +#SBATCH -t 3-00:00 # time (D-HH:MM) +#SBATCH -o slurm.%A.%N.out +#SBATCH -e slurm.%A.%N.err +#SBATCH --mail-type=ALL +#SBATCH --mail-user=n.aznan@ucl.ac.uk # --------------------- # Source bashrc diff --git a/crabs/tracker/track_video.py b/crabs/tracker/track_video.py index e65fd07c..ff1b40d4 100644 --- a/crabs/tracker/track_video.py +++ b/crabs/tracker/track_video.py @@ -63,6 +63,11 @@ def setup(self): """ Load tracking config, trained model and input video path. """ + # Check for CUDA availability + if self.device == "cuda" and not torch.cuda.is_available(): + print("CUDA is not available. Falling back to CPU.") + self.device = "cpu" + with open(self.config_file, "r") as f: self.config = yaml.safe_load(f) From 86a309b8aed2f224bc5b80449740bae83852f56d Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 17:13:32 +0100 Subject: [PATCH 39/52] modified some path --- bash_scripts/run_tracking.sh | 9 +++++---- bash_scripts/run_tracking_all_escape_events.sh | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index 74c3c2d2..a8b1b8d7 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -34,14 +34,15 @@ # ---------------------- # video and inference config - VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_video.mp4 - CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml + VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test.mp4 + CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml # checkpoint - TRAINED_MODEL_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt + TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/243676951438603508/8dbe61069f17453a87c27b4f61f6e681/checkpoints/last.ckpt + # output directory - OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output + OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output # ground truth is available GT_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index d5337c45..b6a38eb8 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -36,13 +36,13 @@ set -o pipefail # make the pipe fail if any part of it fails # video and inference config VIDEO_DIR=/ceph/zoo/raw/CrabField/ramalhete_2023/Escapes PATTERN="*.mov" -CONFIG_FILE=/ceph/scratch/nikkna/crabs-exploration/crabs/tracker/config/tracking_config.yaml +CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml # checkpoint -TRAINED_MODEL_PATH=/ceph/scratch/nikkna/crabs-exploration/ml_ckpt/595664011639950974/e24234398e4b4d5790a9ea3599570637/checkpoints/last.ckpt +TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/243676951438603508/8dbe61069f17453a87c27b4f61f6e681/checkpoints/last.ckpt # output directory -OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/tracking_output +OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output # version of the codebase GIT_BRANCH=nikkna/inference_cluster From 3d337306db7ea92d7b125649f61d31ccffffbe5d Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Tue, 9 Jul 2024 17:19:02 +0100 Subject: [PATCH 40/52] changes branch to main --- bash_scripts/run_tracking.sh | 180 +++++++++--------- .../run_tracking_all_escape_events.sh | 4 +- 2 files changed, 92 insertions(+), 92 deletions(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index a8b1b8d7..63eeaf52 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -9,93 +9,93 @@ #SBATCH -o slurm.%A.%N.out #SBATCH -e slurm.%A.%N.err #SBATCH --mail-type=ALL -#SBATCH --mail-user=n.aznan@ucl.ac.uk - - # --------------------- - # Source bashrc - # ---------------------- - # Otherwise `which python` points to the miniconda module's Python - source ~/.bashrc - - # memory - # see https://pytorch.org/docs/stable/notes/cuda.html#environment-variables - PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True - - # ----------------------------- - # Error settings for bash - # ----------------------------- - # see https://wizardzines.com/comics/bash-errors/ - set -e # do not continue after errors - set -u # throw error if variable is unset - set -o pipefail # make the pipe fail if any part of it fails - - # --------------------- - # Define variables - # ---------------------- - - # video and inference config - VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test.mp4 - CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml - - # checkpoint - TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/243676951438603508/8dbe61069f17453a87c27b4f61f6e681/checkpoints/last.ckpt - - - # output directory - OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output - - # ground truth is available - GT_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv - - # version of the codebase - GIT_BRANCH=nikkna/inference_cluster - - # ----------------------------- - # Create virtual environment - # ----------------------------- - module load miniconda - - # Define a environment for each job in the - # temporary directory of the compute node - ENV_NAME=crabs-dev-$SLURM_JOB_ID - ENV_PREFIX=$TMPDIR/$ENV_NAME - - # create environment - conda create \ - --prefix $ENV_PREFIX \ - -y \ - python=3.10 - - # activate environment - conda activate $ENV_PREFIX - - # install crabs package in virtual env - python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH - - - # log pip and python locations - echo $ENV_PREFIX - which python - which pip - - # print the version of crabs package (last number is the commit hash) - echo "Git branch: $GIT_BRANCH" - conda list crabs - echo "-----" - - # ------------------------------------ - # GPU specs - # ------------------------------------ - echo "Memory used per GPU before training" - echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader - echo "-----" - - # ------------------- - # Run evaluation script - # ------------------- - detect-and-track-video \ - --trained_model_path $TRAINED_MODEL_PATH \ - --video_path $VIDEO_PATH \ - --config_file $CONFIG_FILE \ - --output_dir $OUTPUT_DIR \ - --gt_path $GT_PATH +#SBATCH --mail-user=s.minano@ucl.ac.uk + +# --------------------- +# Source bashrc +# ---------------------- +# Otherwise `which python` points to the miniconda module's Python +source ~/.bashrc + +# memory +# see https://pytorch.org/docs/stable/notes/cuda.html#environment-variables +PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True + +# ----------------------------- +# Error settings for bash +# ----------------------------- +# see https://wizardzines.com/comics/bash-errors/ +set -e # do not continue after errors +set -u # throw error if variable is unset +set -o pipefail # make the pipe fail if any part of it fails + +# --------------------- +# Define variables +# ---------------------- + +# video and inference config +VIDEO_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test.mp4 +CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml + +# checkpoint +TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/243676951438603508/8dbe61069f17453a87c27b4f61f6e681/checkpoints/last.ckpt + + +# output directory +OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output + +# ground truth is available +GT_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv + +# version of the codebase +GIT_BRANCH=main + +# ----------------------------- +# Create virtual environment +# ----------------------------- +module load miniconda + +# Define a environment for each job in the +# temporary directory of the compute node +ENV_NAME=crabs-dev-$SLURM_JOB_ID +ENV_PREFIX=$TMPDIR/$ENV_NAME + +# create environment +conda create \ + --prefix $ENV_PREFIX \ + -y \ + python=3.10 + +# activate environment +conda activate $ENV_PREFIX + +# install crabs package in virtual env +python -m pip install git+https://github.com/SainsburyWellcomeCentre/crabs-exploration.git@$GIT_BRANCH + + +# log pip and python locations +echo $ENV_PREFIX +which python +which pip + +# print the version of crabs package (last number is the commit hash) +echo "Git branch: $GIT_BRANCH" +conda list crabs +echo "-----" + +# ------------------------------------ +# GPU specs +# ------------------------------------ +echo "Memory used per GPU before training" +echo $(nvidia-smi --query-gpu=name,memory.total,memory.free,memory.used --format=csv) #noheader +echo "-----" + +# ------------------- +# Run evaluation script +# ------------------- +detect-and-track-video \ + --trained_model_path $TRAINED_MODEL_PATH \ + --video_path $VIDEO_PATH \ + --config_file $CONFIG_FILE \ + --output_dir $OUTPUT_DIR \ + --gt_path $GT_PATH diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index b6a38eb8..8fe2bdf4 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -9,7 +9,7 @@ #SBATCH -o slurm.%A.%N.out #SBATCH -e slurm.%A.%N.err #SBATCH --mail-type=ALL -#SBATCH --mail-user=n.aznan@ucl.ac.uk +#SBATCH --mail-user=s.minano@ucl.ac.uk # --------------------- # Source bashrc @@ -45,7 +45,7 @@ TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/24367 OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output # version of the codebase -GIT_BRANCH=nikkna/inference_cluster +GIT_BRANCH=main # Check if the target is not a directory if [ ! -d "$VIDEO_DIR" ]; then From b72b4b3bb2f3ef673dd2903eae661d75db989d08 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 10 Jul 2024 10:16:43 +0100 Subject: [PATCH 41/52] add args to handle run on directory on the cluster --- .../run_tracking_all_escape_events.sh | 31 ++++++++++++++++--- crabs/tracker/track_video.py | 11 ++++++- crabs/tracker/utils/io.py | 18 +++++++---- tests/test_unit/test_track_video.py | 1 + 4 files changed, 50 insertions(+), 11 deletions(-) diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 8fe2bdf4..105aa419 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -94,11 +94,34 @@ echo "-----" # ------------------- # Run evaluation script for each .mov file in VIDEO_DIR # ------------------- + +# Create a timestamp +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") + +# Define the parent output directory with the timestamp +PARENT_OUTPUT_DIR="${OUTPUT_DIR}_${TIMESTAMP}" + +# Create the parent output directory +mkdir -p "$PARENT_OUTPUT_DIR" + +# Loop through each video file in the VIDEO_DIR for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do + # Get the base name of the video file (without path and extension) + VIDEO_BASENAME=$(basename "$VIDEO_PATH" .mov) + echo "Processing video: $VIDEO_PATH" + + # Define the subfolder for the current video within the parent output directory + VIDEO_OUTPUT_DIR="$PARENT_OUTPUT_DIR/$VIDEO_BASENAME" + + # Create the subfolder for the current video + mkdir -p "$VIDEO_OUTPUT_DIR" + + # Run the detect-and-track-video command with the appropriate output directory detect-and-track-video \ - --trained_model_path $TRAINED_MODEL_PATH \ - --video_path $VIDEO_PATH \ - --config_file $CONFIG_FILE \ - --output_dir $OUTPUT_DIR + --trained_model_path "$TRAINED_MODEL_PATH" \ + --video_path "$VIDEO_PATH" \ + --config_file "$CONFIG_FILE" \ + --output_dir "$VIDEO_OUTPUT_DIR" + done diff --git a/crabs/tracker/track_video.py b/crabs/tracker/track_video.py index ff1b40d4..82c4e044 100644 --- a/crabs/tracker/track_video.py +++ b/crabs/tracker/track_video.py @@ -92,7 +92,11 @@ def prep_outputs(self): self.csv_writer, self.csv_file, self.tracking_output_dir, - ) = prep_csv_writer(self.args.output_dir, self.video_file_root) + ) = prep_csv_writer( + self.args.output_dir, + self.video_file_root, + self.args.run_on_video_dir, + ) if self.args.save_video: frame_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -304,6 +308,11 @@ def tracking_parse_args(args): action="store_true", help="Save frame to be used in correcting track labelling", ) + parser.add_argument( + "--run_on_video_dir", + action="store_true", + help="option to run track video on directory instead of a video.", + ) parser.add_argument( "--device", type=str, diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index b01b9750..baa244a3 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -13,7 +13,9 @@ ) -def prep_csv_writer(output_dir: str, video_file_root: str): +def prep_csv_writer( + output_dir: str, video_file_root: str, run_on_video_dir: bool +): """ Prepare csv writer to output tracking results. @@ -29,11 +31,15 @@ def prep_csv_writer(output_dir: str, video_file_root: str): Tuple A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ - - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - tracking_output_dir = Path(output_dir + f"_{timestamp}") / video_file_root - # Create the subdirectory for the specific video file root - tracking_output_dir.mkdir(parents=True, exist_ok=True) + if run_on_video_dir: + tracking_output_dir = Path(output_dir) + else: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + tracking_output_dir = ( + Path(output_dir + f"_{timestamp}") / video_file_root + ) + # Create the subdirectory for the specific video file root + tracking_output_dir.mkdir(parents=True, exist_ok=True) csv_file = open( f"{str(tracking_output_dir)}/predicted_tracks.csv", diff --git a/tests/test_unit/test_track_video.py b/tests/test_unit/test_track_video.py index 6833242b..92dbb039 100644 --- a/tests/test_unit/test_track_video.py +++ b/tests/test_unit/test_track_video.py @@ -20,6 +20,7 @@ def mock_args(): device="cuda", gt_path=None, save_video=None, + run_on_video_dir =None ) From 2b9973ecb330c8b20a6579dd60dbf97116b229d4 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 10 Jul 2024 10:16:53 +0100 Subject: [PATCH 42/52] add args to handle run on directory on the cluster --- tests/test_unit/test_track_video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_unit/test_track_video.py b/tests/test_unit/test_track_video.py index 92dbb039..25e2ab4f 100644 --- a/tests/test_unit/test_track_video.py +++ b/tests/test_unit/test_track_video.py @@ -20,7 +20,7 @@ def mock_args(): device="cuda", gt_path=None, save_video=None, - run_on_video_dir =None + run_on_video_dir=None, ) From feace52e94c42cf172dd326cb5944d22e06f74b6 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 10 Jul 2024 10:51:17 +0100 Subject: [PATCH 43/52] cleaned up --- bash_scripts/run_tracking_all_escape_events.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 105aa419..8dafd960 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -35,7 +35,6 @@ set -o pipefail # make the pipe fail if any part of it fails # video and inference config VIDEO_DIR=/ceph/zoo/raw/CrabField/ramalhete_2023/Escapes -PATTERN="*.mov" CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml # checkpoint From 7977b487793959ed414ffc81fb191cf9f246b264 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 10 Jul 2024 10:53:00 +0100 Subject: [PATCH 44/52] cleaned up --- bash_scripts/run_tracking_all_escape_events.sh | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 8dafd960..6aa40a9f 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -94,29 +94,19 @@ echo "-----" # Run evaluation script for each .mov file in VIDEO_DIR # ------------------- -# Create a timestamp TIMESTAMP=$(date +"%Y%m%d_%H%M%S") - -# Define the parent output directory with the timestamp PARENT_OUTPUT_DIR="${OUTPUT_DIR}_${TIMESTAMP}" - -# Create the parent output directory mkdir -p "$PARENT_OUTPUT_DIR" -# Loop through each video file in the VIDEO_DIR for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do - # Get the base name of the video file (without path and extension) VIDEO_BASENAME=$(basename "$VIDEO_PATH" .mov) echo "Processing video: $VIDEO_PATH" - # Define the subfolder for the current video within the parent output directory VIDEO_OUTPUT_DIR="$PARENT_OUTPUT_DIR/$VIDEO_BASENAME" - # Create the subfolder for the current video mkdir -p "$VIDEO_OUTPUT_DIR" - # Run the detect-and-track-video command with the appropriate output directory detect-and-track-video \ --trained_model_path "$TRAINED_MODEL_PATH" \ --video_path "$VIDEO_PATH" \ From bff7606101c33b776beeb2420c7b1aae1edb91cf Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Wed, 10 Jul 2024 11:11:39 +0100 Subject: [PATCH 45/52] forgot the args --- bash_scripts/run_tracking_all_escape_events.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 6aa40a9f..e2a3e9e7 100644 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -111,6 +111,6 @@ for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do --trained_model_path "$TRAINED_MODEL_PATH" \ --video_path "$VIDEO_PATH" \ --config_file "$CONFIG_FILE" \ - --output_dir "$VIDEO_OUTPUT_DIR" - + --output_dir "$VIDEO_OUTPUT_DIR" \ + --run_on_video_dir done From c5bd870ff25e27d53090c2f076727577539509e8 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan <48319650+nikk-nikaznan@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:12:30 +0100 Subject: [PATCH 46/52] Update guides/TrackingModelHPC.md Co-authored-by: sfmig <33267254+sfmig@users.noreply.github.com> Signed-off-by: nikk-nikaznan <48319650+nikk-nikaznan@users.noreply.github.com> --- guides/TrackingModelHPC.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/guides/TrackingModelHPC.md b/guides/TrackingModelHPC.md index 7d3fc3e0..d77c0bbc 100644 --- a/guides/TrackingModelHPC.md +++ b/guides/TrackingModelHPC.md @@ -1,4 +1,4 @@ -# Evaluate a trained detector model in the cluster +# Evaluate a detector+tracking model in the cluster 1. **Preparatory steps** From cd497d7f6c0ba77d3e7f89a22c4e74121bafc826 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 22 Jul 2024 09:28:18 +0100 Subject: [PATCH 47/52] extension, check dir --- bash_scripts/run_tracking_all_escape_events.sh | 5 +++-- crabs/tracker/track_video.py | 12 ++++++------ crabs/tracker/utils/io.py | 6 ++---- 3 files changed, 11 insertions(+), 12 deletions(-) mode change 100644 => 100755 bash_scripts/run_tracking_all_escape_events.sh diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh old mode 100644 new mode 100755 index e2a3e9e7..3102d3fb --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -35,6 +35,7 @@ set -o pipefail # make the pipe fail if any part of it fails # video and inference config VIDEO_DIR=/ceph/zoo/raw/CrabField/ramalhete_2023/Escapes +VIDEO_EXT=mov CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml # checkpoint @@ -98,8 +99,8 @@ TIMESTAMP=$(date +"%Y%m%d_%H%M%S") PARENT_OUTPUT_DIR="${OUTPUT_DIR}_${TIMESTAMP}" mkdir -p "$PARENT_OUTPUT_DIR" -for VIDEO_PATH in "$VIDEO_DIR"/*.mov; do - VIDEO_BASENAME=$(basename "$VIDEO_PATH" .mov) +for VIDEO_PATH in "$VIDEO_DIR"/*"$VIDEO_EXT"; do + VIDEO_BASENAME=$(basename "$VIDEO_PATH" ."$VIDEO_EXT") echo "Processing video: $VIDEO_PATH" diff --git a/crabs/tracker/track_video.py b/crabs/tracker/track_video.py index 82c4e044..8955e81a 100644 --- a/crabs/tracker/track_video.py +++ b/crabs/tracker/track_video.py @@ -95,7 +95,7 @@ def prep_outputs(self): ) = prep_csv_writer( self.args.output_dir, self.video_file_root, - self.args.run_on_video_dir, + # self.args.run_on_video_dir, ) if self.args.save_video: @@ -308,11 +308,11 @@ def tracking_parse_args(args): action="store_true", help="Save frame to be used in correcting track labelling", ) - parser.add_argument( - "--run_on_video_dir", - action="store_true", - help="option to run track video on directory instead of a video.", - ) + # parser.add_argument( + # "--run_on_video_dir", + # action="store_true", + # help="option to run track video on directory instead of a video.", + # ) parser.add_argument( "--device", type=str, diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index baa244a3..f1e577ef 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -13,9 +13,7 @@ ) -def prep_csv_writer( - output_dir: str, video_file_root: str, run_on_video_dir: bool -): +def prep_csv_writer(output_dir: str, video_file_root: str): """ Prepare csv writer to output tracking results. @@ -31,7 +29,7 @@ def prep_csv_writer( Tuple A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ - if run_on_video_dir: + if os.path.isdir(video_file_root): tracking_output_dir = Path(output_dir) else: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") From 586d41268b2482fec95b9f499ab79e9221a0567c Mon Sep 17 00:00:00 2001 From: nikk-nikaznan <48319650+nikk-nikaznan@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:29:12 +0100 Subject: [PATCH 48/52] Update bash_scripts/run_tracking.sh Co-authored-by: sfmig <33267254+sfmig@users.noreply.github.com> Signed-off-by: nikk-nikaznan <48319650+nikk-nikaznan@users.noreply.github.com> --- bash_scripts/run_tracking.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_scripts/run_tracking.sh b/bash_scripts/run_tracking.sh index 63eeaf52..9d19e4de 100644 --- a/bash_scripts/run_tracking.sh +++ b/bash_scripts/run_tracking.sh @@ -44,7 +44,7 @@ TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/24367 # output directory OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output -# ground truth is available +# ground truth if available GT_PATH=/ceph/zoo/users/sminano/crabs_tracks_label/04.09.2023-04-Right_RE_test/04.09.2023-04-Right_RE_test_corrected_ST_csv.csv # version of the codebase From 742ee1aeb97960bf90ed64ed35f6234554b397d0 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 29 Jul 2024 09:28:34 +0100 Subject: [PATCH 49/52] debug --- bash_scripts/run_tracking_all_escape_events.sh | 7 +++---- crabs/tracker/track_video.py | 1 - crabs/tracker/utils/io.py | 2 ++ 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bash_scripts/run_tracking_all_escape_events.sh b/bash_scripts/run_tracking_all_escape_events.sh index 3102d3fb..de82434a 100755 --- a/bash_scripts/run_tracking_all_escape_events.sh +++ b/bash_scripts/run_tracking_all_escape_events.sh @@ -34,15 +34,15 @@ set -o pipefail # make the pipe fail if any part of it fails # ---------------------- # video and inference config -VIDEO_DIR=/ceph/zoo/raw/CrabField/ramalhete_2023/Escapes -VIDEO_EXT=mov +VIDEO_DIR=/ceph/scratch/nikkna/crabs-exploration/crab_video +VIDEO_EXT=mp4 CONFIG_FILE=/ceph/zoo/users/sminano/cluster_tracking_config.yaml # checkpoint TRAINED_MODEL_PATH=/ceph/zoo/users/sminano/ml-runs-all/ml_runs-nikkna-copy/243676951438603508/8dbe61069f17453a87c27b4f61f6e681/checkpoints/last.ckpt # output directory -OUTPUT_DIR=/ceph/zoo/users/sminano/crabs_track_output +OUTPUT_DIR=/ceph/scratch/nikkna/crabs-exploration/crabs_track_output # version of the codebase GIT_BRANCH=main @@ -113,5 +113,4 @@ for VIDEO_PATH in "$VIDEO_DIR"/*"$VIDEO_EXT"; do --video_path "$VIDEO_PATH" \ --config_file "$CONFIG_FILE" \ --output_dir "$VIDEO_OUTPUT_DIR" \ - --run_on_video_dir done diff --git a/crabs/tracker/track_video.py b/crabs/tracker/track_video.py index 8955e81a..95ff9354 100644 --- a/crabs/tracker/track_video.py +++ b/crabs/tracker/track_video.py @@ -95,7 +95,6 @@ def prep_outputs(self): ) = prep_csv_writer( self.args.output_dir, self.video_file_root, - # self.args.run_on_video_dir, ) if self.args.save_video: diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index f1e577ef..efafa74e 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -30,7 +30,9 @@ def prep_csv_writer(output_dir: str, video_file_root: str): A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ if os.path.isdir(video_file_root): + print("here") tracking_output_dir = Path(output_dir) + print(tracking_output_dir) else: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") tracking_output_dir = ( From b96d4fb0a7b3b54a1b5a55ff36eb647c68d04a87 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 29 Jul 2024 10:40:00 +0100 Subject: [PATCH 50/52] debug --- crabs/tracker/utils/io.py | 1 + 1 file changed, 1 insertion(+) diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index efafa74e..8226fefa 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -29,6 +29,7 @@ def prep_csv_writer(output_dir: str, video_file_root: str): Tuple A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ + print(video_file_root) if os.path.isdir(video_file_root): print("here") tracking_output_dir = Path(output_dir) From e8d77f0354114006db5060565e3ae4725212736b Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 29 Jul 2024 11:45:49 +0100 Subject: [PATCH 51/52] add log --- crabs/tracker/track_video.py | 3 ++- crabs/tracker/utils/io.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crabs/tracker/track_video.py b/crabs/tracker/track_video.py index 95ff9354..bb37e890 100644 --- a/crabs/tracker/track_video.py +++ b/crabs/tracker/track_video.py @@ -65,7 +65,7 @@ def setup(self): """ # Check for CUDA availability if self.device == "cuda" and not torch.cuda.is_available(): - print("CUDA is not available. Falling back to CPU.") + logging.info("CUDA is not available. Falling back to CPU.") self.device = "cpu" with open(self.config_file, "r") as f: @@ -88,6 +88,7 @@ def prep_outputs(self): """ Prepare csv writer and if required, video writer. """ + logging.info(self.video_file_root) ( self.csv_writer, self.csv_file, diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index 8226fefa..97ea7dc5 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -1,4 +1,5 @@ import csv +import logging import os from datetime import datetime from pathlib import Path @@ -29,11 +30,11 @@ def prep_csv_writer(output_dir: str, video_file_root: str): Tuple A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ - print(video_file_root) + logging.info(video_file_root) if os.path.isdir(video_file_root): - print("here") + logging.info("here") tracking_output_dir = Path(output_dir) - print(tracking_output_dir) + logging.info(tracking_output_dir) else: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") tracking_output_dir = ( From 5121e459b8100e6619cd0c90abb9f12756e86803 Mon Sep 17 00:00:00 2001 From: nikk-nikaznan Date: Mon, 29 Jul 2024 13:18:47 +0100 Subject: [PATCH 52/52] add log --- crabs/tracker/utils/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crabs/tracker/utils/io.py b/crabs/tracker/utils/io.py index 97ea7dc5..bd2baaba 100644 --- a/crabs/tracker/utils/io.py +++ b/crabs/tracker/utils/io.py @@ -31,7 +31,7 @@ def prep_csv_writer(output_dir: str, video_file_root: str): A tuple containing the CSV writer, the CSV file object, and the tracking output directory path. """ logging.info(video_file_root) - if os.path.isdir(video_file_root): + if os.path.isdir(Path(video_file_root)): logging.info("here") tracking_output_dir = Path(output_dir) logging.info(tracking_output_dir)