|
21 | 21 |
|
22 | 22 | from django.conf import settings
|
23 | 23 |
|
| 24 | +from aat import _detection_graph, CWD_PATH |
24 | 25 | from .models import Cascade
|
25 | 26 | from .utils.utils import exec_cmd
|
26 | 27 | import aat.utils.recognizer_utils as recognizer
|
@@ -143,13 +144,6 @@ def object_detection2(self, video_path, framerate):
|
143 | 144 | log.debug("Cannot open video path {}".format(video_path))
|
144 | 145 | return {'od_error': 'Cannot open video'}
|
145 | 146 |
|
146 |
| - CWD_PATH = os.path.join(os.getenv('FACEREC_APP_DIR', '..'), 'aat') |
147 |
| - |
148 |
| - # Path to frozen detection graph. This is the actual model that is used for the object detection. |
149 |
| - MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' |
150 |
| - # MODEL_NAME = 'faster_rcnn_inception_resnet_v2_atrous_oid_2018_01_28' |
151 |
| - PATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME, 'frozen_inference_graph.pb') |
152 |
| - |
153 | 147 | # List of the strings that is used to add correct label for each box.
|
154 | 148 | PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')
|
155 | 149 | # PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', ' oid_bbox_trainable_label_map.pbtxt')
|
@@ -186,15 +180,9 @@ def object_detection2(self, video_path, framerate):
|
186 | 180 |
|
187 | 181 | # frame_with_objects = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
188 | 182 | frame_with_objects = frame.copy()
|
189 |
| - detection_graph = tf.Graph() |
190 |
| - with detection_graph.as_default(): |
191 |
| - od_graph_def = tf.GraphDef() |
192 |
| - with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: |
193 |
| - serialized_graph = fid.read() |
194 |
| - od_graph_def.ParseFromString(serialized_graph) |
195 |
| - tf.import_graph_def(od_graph_def, name='') |
196 |
| - |
197 |
| - sess = tf.Session(graph=detection_graph) |
| 183 | + |
| 184 | + detection_graph = _detection_graph |
| 185 | + sess = tf.Session(graph=detection_graph) |
198 | 186 |
|
199 | 187 | # Expand dimensions since the model expects images to have shape:
|
200 | 188 | # [1, None, None, 3]
|
|
0 commit comments