-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpipeline.py
144 lines (119 loc) · 5.73 KB
/
pipeline.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gc
import logging
import os
import random
from pathlib import Path, PurePath
import cv2
import hydra
import imgaug
import numpy as np
import torch
from omegaconf import DictConfig
from src.data.classification_metadata import build_classification_metadata
from src.data.multimodal_metadata import build_multimodal_metadata
from src.data.segmentation_metadata import build_segmentation_metadata
from src.training.classify import start_classification
from src.training.multimodal import start_multimodal_classification
from src.training.segment import start_training
from src.utils.classification_utils import score_avg_classification, score_avg_classification_tta, \
split_train_test
from src.utils.multimodal_utils import score_avg_multi_modal_score_tta, split_train_test
from src.utils.segmentation_utils import visualize_segmentation, generate_spine_map, generate_mri_labels, \
split_test_train, score_avg_segmentation
# logger for generating log file
logger = logging.getLogger(__name__)
def create_sub_dir(cfg):
sub_dirs = ['models', 'plots', 'pickles']
for sub_dir in sub_dirs:
dir_path = Path(PurePath.joinpath(Path.cwd(), "results", sub_dir))
dir_path.mkdir(parents=True, exist_ok=True)
def set_random_seed(seed):
torch.cuda.empty_cache()
gc.collect()
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
imgaug.random.seed(seed)
random.seed(seed)
cv2.setRNGSeed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def start_segmentation_pipeline(cfg: DictConfig) -> None:
if cfg.mode.segmentation.run_mode == 'train':
if cfg.device == 'cuda':
logger.info("Starting the segmentation pipeline with device: " + cfg.device)
else:
logger.warning("Starting the segmentation pipeline with device: " + cfg.device)
# Build Segmentation Metadata from Images and Masks directories
sdf = build_segmentation_metadata(cfg, logger)
segmentation_df, test_df = split_test_train(cfg, sdf)
start_training(cfg, segmentation_df, logger)
model_iou = score_avg_segmentation(cfg, test_df, logger)
logger.info(f"Model Score:{model_iou:.3f}")
elif cfg.mode.segmentation.run_mode == 'view':
# Visualize random segmentation
input = [fname for fname in list(PurePath.joinpath(Path(cfg.mode.test.dir)).glob("*.png"))]
index = np.random.randint(0, len(input))
logger.info("Visualizing the file:{}".format(input[index]))
visualize_segmentation(cfg, str(input[index]), logger)
elif cfg.mode.segmentation.run_mode == 'spine-map':
generate_spine_map(cfg, logger)
elif cfg.mode.segmentation.run_mode == 'classify-labels':
generate_mri_labels(cfg, logger)
def start_classification_pipeline(cfg: DictConfig) -> None:
if cfg.mode.classification.run_mode == 'train':
if cfg.device == 'cuda':
logger.info("Starting the classification pipeline with device: " + cfg.device)
else:
logger.warning("Starting the classification pipeline with device: " + cfg.device)
# Build Classification Metadata from Images and Masks directories
cdf = build_classification_metadata(cfg, logger)
# split_train_test method will split into train_df and test dfs
classification_df, test_df = split_train_test(cfg, cdf)
start_classification(cfg, classification_df, logger)
# avg_model_score = score_avg_classification(cfg, test_df, logger)
if cfg.classification_architecture['_target_'] == 'spinenetv2' or cfg.classification_architecture[
'_target_'] == 'spinenet_miccai':
avg_model_score = score_avg_classification(cfg, test_df, logger)
else:
if cfg.training.use_tta == 'Y':
logger.info("Using TTA to calculate Test Score")
avg_model_score = score_avg_classification_tta(cfg, test_df, logger)
else:
avg_model_score = score_avg_classification(cfg, test_df, logger)
logger.info(f"Model Score:{avg_model_score * 100:.3f}")
def start_multimodal_pipeline(cfg: DictConfig) -> None:
if cfg.mode.multimodal.run_mode == 'train':
if cfg.device == 'cuda':
logger.info("Starting the multimodal pipeline with device: " + cfg.device)
else:
logger.warning("Starting the multimodal pipeline with device: " + cfg.device)
# Build Classification Metadata from Images and Masks directories
cdf = build_multimodal_metadata(cfg, logger)
# split_train_test method will split into train_df and test dfs
multimodal_df, test_df = split_train_test(cfg, cdf)
start_multimodal_classification(cfg, multimodal_df, logger)
# avg_model_score = score_avg_multi_modal_score(cfg, test_df, logger)
avg_model_score = score_avg_multi_modal_score_tta(cfg, test_df, logger)
logger.info(f"Model Score:{avg_model_score * 100:.3f}")
@hydra.main(version_base=None, config_path="configs", config_name="configs")
def start_pipeline(cfg: DictConfig) -> None:
pipeline = list(cfg.mode.keys())[0]
if pipeline == 'segmentation':
set_random_seed(cfg.random_seed)
create_sub_dir(cfg)
start_segmentation_pipeline(cfg)
elif pipeline == 'classification':
set_random_seed(cfg.random_seed)
create_sub_dir(cfg)
start_classification_pipeline(cfg)
elif pipeline == 'multimodal':
set_random_seed(cfg.random_seed)
create_sub_dir(cfg)
start_multimodal_pipeline(cfg)
if __name__ == '__main__':
start_pipeline()
# model = GradingModel()
# output = model(torch.randn(1, 1, 4, 128, 128))
# print(output.shape)