-
Notifications
You must be signed in to change notification settings - Fork 0
/
depth_estimator.py
executable file
·65 lines (35 loc) · 1.56 KB
/
depth_estimator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import torch
from PIL import Image
from dorn.model import utils
from dorn.model.network.DORN_kitti import DORN
class DORNNET():
config = utils.read_config()
def __init__(self):
self.depth_net = DORN()
path_model = DORNNET.config['MODEL']['kitti']
print('{0} model is used!'.format('kitti'))
model_dict = utils.get_model(path_model)
# load the trained model's parameters
self.depth_net.load_state_dict(model_dict)
# move the network to cuda/gpu device if available
self.depth_net.to(utils.device)
# bn and dropout layers will work in evaluation mode
self.depth_net.eval()
def __call__(self,filename):
img = Image.open(filename)
img_tensor = utils.transform_img(img, self.config)
if self.depth_net.training:
raise ValueError('Model is in training mode!')
# disable autograd engine because of no back-propagatio
with torch.no_grad():
pred_labels, ord_probs = self.depth_net(img_tensor)
return pred_labels, ord_probs
if __name__ == '__main__':
import matplotlib.pyplot as plt
filename = './model/demo/scene-0002_img_7.png'
dorn = DORNNET()
depth_map = dorn(filename)
plt.figure('depth_map')
plt.imshow(depth_map)
plt.show()
plt.pause(1)