diff --git a/cramp-core/src/main/java/org/eduze/fyp/core/CameraNotifier.java b/cramp-core/src/main/java/org/eduze/fyp/core/CameraNotifier.java index 98f896c9..161610d7 100644 --- a/cramp-core/src/main/java/org/eduze/fyp/core/CameraNotifier.java +++ b/cramp-core/src/main/java/org/eduze/fyp/core/CameraNotifier.java @@ -117,7 +117,7 @@ public void completed(HttpResponse httpResponse) { @Override public void failed(Exception e) { - logger.error("Error occurred when notifying camera with", e); + logger.error("Error occurred when notifying camera: {}", e.getMessage()); setState(State.IDLE); } diff --git a/cramp-web/src/main/java/org/eduze/fyp/web/services/ConfigService.java b/cramp-web/src/main/java/org/eduze/fyp/web/services/ConfigService.java index 30de5f1c..e295f02c 100644 --- a/cramp-web/src/main/java/org/eduze/fyp/web/services/ConfigService.java +++ b/cramp-web/src/main/java/org/eduze/fyp/web/services/ConfigService.java @@ -122,6 +122,7 @@ public CameraConfig getCameraConfig(int cameraId) { p.setY(p.getY() * cameraConfig.getHeight() / Constants.CAMERA_VIEW_HEIGHT); }); + logger.debug("Get camera config: {}", cameraConfig); return cameraConfig; } diff --git a/ngapp/src/app/realtime-map/realtime-map.component.html b/ngapp/src/app/realtime-map/realtime-map.component.html index 9b172f3b..4bc65ed4 100644 --- a/ngapp/src/app/realtime-map/realtime-map.component.html +++ b/ngapp/src/app/realtime-map/realtime-map.component.html @@ -1,6 +1,6 @@
-

Real-time Map

+

Real-time Map - {{d.cameraGroup.name}}

{{d.personSnapshots.length}} people
diff --git a/sense/App.py b/sense/App.py index eb15799c..a65ee97c 100644 --- a/sense/App.py +++ b/sense/App.py @@ -16,13 +16,24 @@ logging.basicConfig(level=logging.DEBUG) -def run_cam_server(input_queue, output_queue): +def run_cam_server_pier2(input_queue, output_queue): cap, markers, map_markers = load_video("bia.pier2") position_mapper = PTEMapper(markers, map_markers) sense = Sense(input_queue, output_queue, position_mapper, AngleMapper(position_mapper), WorldSpaceTracker(), Snapy()) - server = LightWeightCamServer(sense, cap, (0.5, 0.5)) + server = LightWeightCamServer(10005, sense, cap, (0.5, 0.5)) + server.load_config(markers, map_markers) + server.start_cam_server() + + +def run_cam_server_departure(input_queue, output_queue): + cap, markers, map_markers = load_video("bia.departure") + position_mapper = PTEMapper(markers, map_markers) + sense = Sense(input_queue, output_queue, position_mapper, AngleMapper(position_mapper), WorldSpaceTracker(), + Snapy()) + + server = LightWeightCamServer(10004, sense, cap, (0.5, 0.5)) server.load_config(markers, map_markers) server.start_cam_server() @@ -36,20 +47,27 @@ def run_detector_service(queue_pairs): if __name__ == "__main__": multiprocessing.set_start_method("spawn") - input_queue = Queue() - output_queue = Queue() + pier2_input = Queue() + pier2_output = Queue() - queue_pairs = [(input_queue, output_queue)] + departure_input = Queue() + departure_output = Queue() + + queue_pairs = [(pier2_input, pier2_output), (departure_input, departure_output)] service_process = Process(target=run_detector_service, args=([queue_pairs])) service_process.daemon = True service_process.start() - cam_process = Process(target=run_cam_server, args=(input_queue, output_queue)) - cam_process.daemon = True - cam_process.start() + pier2_process = Process(target=run_cam_server_pier2, args=(pier2_input, pier2_output)) + pier2_process.daemon = True + pier2_process.start() + + departure_process = Process(target=run_cam_server_departure, args=(departure_input, departure_output)) + departure_process.daemon = True + departure_process.start() - processes = [service_process, cam_process] + processes = [service_process, pier2_process, departure_process] running = True while running: diff --git a/sense/LightWeightCamServer.py b/sense/LightWeightCamServer.py index dfb009fa..51a07de1 100644 --- a/sense/LightWeightCamServer.py +++ b/sense/LightWeightCamServer.py @@ -1,8 +1,10 @@ import logging import cv2 -from flask import Flask, jsonify +from flask import Flask, jsonify, request +from ScreenSpacePreview import ScreenSpacePreview +from SenseMapViewer import SenseMapViewer from Util import restEncodeImage from communication.ServerSession import ServerSession @@ -15,33 +17,39 @@ class LightWeightCamServer: todo: remove tracking - Imesha """ - def __init__(self, sense, capture, scale=(0.5, 0.5)): + def __init__(self, port, sense, capture, scale=(0.5, 0.5)): """ Initialize """ + self.logger = logging.getLogger("CamServer") + + self.app = Flask(__name__) self.sense = sense self.capture = capture self.scale = scale - self.port = 10005 + self.port = port self.server_session = ServerSession(my_ip="localhost", my_port=self.port) - self.logger = logging.getLogger("CamServer") + + self.screen_preview = ScreenSpacePreview(sense.position_mapper) + self.track_viewer = SenseMapViewer([sense.position_mapper], True) def load_config(self, default_markers=None, default_map_markers=None): """ Connect to server and load configurations from it :return: """ + self.track_viewer.setupWindows() # Obtain preview frame to be sent to server for configuration status, frame = self.capture.read() frame = cv2.resize(frame, (0, 0), fx=self.scale[0], fy=self.scale[1]) - w, h, depth = frame.shape + h, w, depth = frame.shape self.server_session.configureMapping(frame, w, h, default_markers, default_map_markers) # Obtain marker points from server. Blocks until server responds marker points. mapping = self.server_session.obtainMapping() - self.logger.info("Obtained mapping: %s", mapping) + self.logger.info("Obtained mapping: {} - {}".format(mapping.screen_space_points, mapping.world_space_points)) self.sense.position_mapper.screen_points = mapping.screen_space_points # TODO: Do scaling correction @@ -78,7 +86,8 @@ def get_map_at(self, frame_time): # Generate Response for sensed_person in self.sense.sensed_persons.values(): person = sensed_person.tracked_person - # Append to results + + # self.logger.debug("Result: ({},{})".format(person.position[0], person.position[1])) result = { "x": int(person.position[0]), "y": int(person.position[1]), @@ -93,16 +102,9 @@ def get_map_at(self, frame_time): detection = sensed_person.tracked_person.detection # print(detection.person_bound) (dc_x, dc_y, dc_w, dc_h) = map(int, detection.person_bound) - (f_x, f_y, f_w, f_h) = detection.person_bound - (l_x, l_y) = detection.leg_point - (el_x, el_y) = detection.estimated_leg_point - cv2.rectangle(frame, (f_x, f_y), (f_w, f_h), (0, 0, 0), 2) - cv2.drawMarker(frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS, thickness=3) - cv2.drawMarker(frame, (int(el_x), int(el_y)), (255, 0, 255), cv2.MARKER_DIAMOND) snap = frame[dc_y:dc_h, dc_x:dc_w] if snap.shape[0] > 0 and snap.shape[1] > 0: - logging.debug("Snapping person") result["image"] = restEncodeImage(snap) result_coordinates.append(result) @@ -117,8 +119,15 @@ def get_map_at(self, frame_time): "personCoordinates": result_coordinates } - cv2.imshow("output", frame) - cv2.waitKey(1) + self.screen_preview.renderFrame(self.sense, frame, None) + self.track_viewer.showMap([self.sense]) + + key = cv2.waitKey(1) + if key & 0xFF == ord('q'): + func = request.environ.get('werkzeug.server.shutdown') + if func is None: + raise RuntimeError('Not running with the Werkzeug Server') + func() return result @@ -128,14 +137,12 @@ def start_cam_server(self): :param cam_port: port of camera server :return: """ - app = Flask(__name__) - _self_shaddow = self - @app.route('/getMap/') + @self.app.route('/getMap/') def getMapFR(frame_time): logging.debug("Sending map for timestamp - %d", frame_time) return jsonify(_self_shaddow.get_map_at(frame_time)) logging.info("Starting server at " + str(self.port)) - app.run(port=self.port) + self.app.run(port=self.port) diff --git a/sense/ScreenSpacePreview.py b/sense/ScreenSpacePreview.py index d277c44c..0184f6b8 100755 --- a/sense/ScreenSpacePreview.py +++ b/sense/ScreenSpacePreview.py @@ -45,11 +45,11 @@ def renderFrame(self, sense, colour_frame, gray_frame=None): (l_x, l_y) = person.leg_point (el_x, el_y) = person.estimated_leg_point cv2.rectangle(colour_frame, (f_x, f_y), (f_w, f_h), (0, 0, 0), 2) - cv2.drawMarker(colour_frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS,thickness=3) + cv2.drawMarker(colour_frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS, thickness=3) cv2.drawMarker(colour_frame, (int(el_x), int(el_y)), (255, 0, 255), cv2.MARKER_DIAMOND) - for k,v in person.tracked_points.items(): - cv2.drawMarker(colour_frame, (v[0],v[1]), (0, 0, 255),cv2.MARKER_TILTED_CROSS,10) + for k, v in person.tracked_points.items(): + cv2.drawMarker(colour_frame, (v[0], v[1]), (0, 0, 255), cv2.MARKER_TILTED_CROSS, 10) # cv2.putText(frame, str(person.head_direction), (int(person.central_point[0]), int(person.central_point[1])), # cv2.FONT_HERSHEY_COMPLEX, 0.4, (0, 255, 0)) @@ -57,7 +57,10 @@ def renderFrame(self, sense, colour_frame, gray_frame=None): if self.transform_mapper is not None: # Render markers for marker in self.transform_mapper.screen_points: - cv2.drawMarker(colour_frame, marker, (0, 0, 255)) + # print(marker) + cv2.drawMarker(colour_frame, marker, (0, 125, 255), markerType=cv2.MARKER_STAR, thickness=3) + else: + print("No Mapper") for sensed_person in sense.sensed_persons.values(): person = sensed_person.tracked_person @@ -71,8 +74,6 @@ def renderFrame(self, sense, colour_frame, gray_frame=None): colour = (0, 255, 0) # Colour indicating sitting pose_text = "Sit" - - # cv2.putText(colour_frame, str(person.label), # (int(person.detection.central_point[0]), int(person.detection.central_point[1])), # cv2.FONT_HERSHEY_COMPLEX, 0.4, colour) diff --git a/sense/test_videos/videos.conf b/sense/test_videos/videos.conf index bde34563..d7ba5d9a 100644 --- a/sense/test_videos/videos.conf +++ b/sense/test_videos/videos.conf @@ -3,6 +3,11 @@ path = test_videos/bia/pier2.mkv markers = [] map_markers = [] +[bia.departure] +path = test_videos/bia/departure.mkv +markers = [] +map_markers = [] + [office.room] path = test_videos/test_office.mp4 markers = [(615, 340), (13, 334), (175, 90), (430, 85)]