-
Notifications
You must be signed in to change notification settings - Fork 38
/
test_kitti.py
executable file
·132 lines (92 loc) · 3.18 KB
/
test_kitti.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Common libs
import time
import os
import numpy as np
# My libs
from utils.config import Config
from utils.tester import ModelTester
from models.KPFCNN_model import KernelPointFCNN
# Datasets
from datasets.KITTI import KITTIDataset
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def test_caller(path, step_ind, on_val):
# Disable warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
###########################
# Load the model parameters
###########################
# Load model parameters
config = Config()
config.load(path)
##################################
# Change model parameters for test
##################################
# Change parameters for the test here. For example, you can stop augmenting the input data.
#config.augment_noise = 0.0001
#config.augment_color = 1.0
#config.validation_size = 500
#config.batch_num = 10
##############
# Prepare Data
##############
print()
print('Dataset Preparation')
print('*******************')
# Initiate dataset configuration
dataset = KITTIDataset(1, config.first_subsampling_dl, load_test=True)
# Initialize input pipelines
dataset.init_test_input_pipeline(config)
##############
# Define Model
##############
print('Creating Model')
print('**************\n')
t1 = time.time()
model = KernelPointFCNN(dataset.flat_inputs, config)
# Find all snapshot in the chosen training folder
snap_path = os.path.join(path, 'snapshots')
snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta']
# Find which snapshot to restore
chosen_step = np.sort(snap_steps)[step_ind]
chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step))
# Create a tester class
tester = ModelTester(model, restore_snap=chosen_snap)
t2 = time.time()
print('\n----------------')
print('Done in {:.1f} s'.format(t2 - t1))
print('----------------\n')
############
# Start test
############
print('Start Test')
print('**********\n')
tester.test_kitti(model, dataset)
if __name__ == '__main__':
# Default is last log and last snapshot
chosen_log = 'last_KITTI'
chosen_snapshot = -1
on_val = True
###########################
# Call the test initializer
###########################
# Dataset name
test_dataset = 'KITTI'
# List all training logs
logs = np.sort([os.path.join('results_kitti', f) for f in os.listdir('results_kitti') if f.startswith('Log')])
# Find the last log of asked dataset or manually specify the log
for log in logs[::-1]:
log_config = Config()
log_config.load(log)
if log_config.dataset.startswith(test_dataset):
chosen_log = log
break
# chosen_log = `results_kitti/Log_`
# Check if log exists
if not os.path.exists(chosen_log):
raise ValueError('The given log does not exists: ' + chosen_log)
# Let's go
test_caller(chosen_log, chosen_snapshot, on_val)