Skip to content

Commit

Permalink
Initial implementation of multicam + camera group scenario.
Browse files Browse the repository at this point in the history
  • Loading branch information
IMS94 committed Oct 27, 2018
1 parent 1cd56d5 commit d9630da
Show file tree
Hide file tree
Showing 7 changed files with 69 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public void completed(HttpResponse httpResponse) {

@Override
public void failed(Exception e) {
logger.error("Error occurred when notifying camera with", e);
logger.error("Error occurred when notifying camera: {}", e.getMessage());
setState(State.IDLE);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ public CameraConfig getCameraConfig(int cameraId) {
p.setY(p.getY() * cameraConfig.getHeight() / Constants.CAMERA_VIEW_HEIGHT);
});

logger.debug("Get camera config: {}", cameraConfig);
return cameraConfig;
}

Expand Down
2 changes: 1 addition & 1 deletion ngapp/src/app/realtime-map/realtime-map.component.html
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<div class="box box-primary" *ngFor="let d of data">
<div class="box-header with-border">
<h3 class="box-title">Real-time Map</h3>
<h3 class="box-title">Real-time Map - {{d.cameraGroup.name}}</h3>
<div class="box-tools pull-right">
<span class="badge">{{d.personSnapshots.length}}</span> people
</div>
Expand Down
36 changes: 27 additions & 9 deletions sense/App.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,24 @@
logging.basicConfig(level=logging.DEBUG)


def run_cam_server(input_queue, output_queue):
def run_cam_server_pier2(input_queue, output_queue):
cap, markers, map_markers = load_video("bia.pier2")
position_mapper = PTEMapper(markers, map_markers)
sense = Sense(input_queue, output_queue, position_mapper, AngleMapper(position_mapper), WorldSpaceTracker(),
Snapy())

server = LightWeightCamServer(sense, cap, (0.5, 0.5))
server = LightWeightCamServer(10005, sense, cap, (0.5, 0.5))
server.load_config(markers, map_markers)
server.start_cam_server()


def run_cam_server_departure(input_queue, output_queue):
cap, markers, map_markers = load_video("bia.departure")
position_mapper = PTEMapper(markers, map_markers)
sense = Sense(input_queue, output_queue, position_mapper, AngleMapper(position_mapper), WorldSpaceTracker(),
Snapy())

server = LightWeightCamServer(10004, sense, cap, (0.5, 0.5))
server.load_config(markers, map_markers)
server.start_cam_server()

Expand All @@ -36,20 +47,27 @@ def run_detector_service(queue_pairs):
if __name__ == "__main__":
multiprocessing.set_start_method("spawn")

input_queue = Queue()
output_queue = Queue()
pier2_input = Queue()
pier2_output = Queue()

queue_pairs = [(input_queue, output_queue)]
departure_input = Queue()
departure_output = Queue()

queue_pairs = [(pier2_input, pier2_output), (departure_input, departure_output)]

service_process = Process(target=run_detector_service, args=([queue_pairs]))
service_process.daemon = True
service_process.start()

cam_process = Process(target=run_cam_server, args=(input_queue, output_queue))
cam_process.daemon = True
cam_process.start()
pier2_process = Process(target=run_cam_server_pier2, args=(pier2_input, pier2_output))
pier2_process.daemon = True
pier2_process.start()

departure_process = Process(target=run_cam_server_departure, args=(departure_input, departure_output))
departure_process.daemon = True
departure_process.start()

processes = [service_process, cam_process]
processes = [service_process, pier2_process, departure_process]

running = True
while running:
Expand Down
47 changes: 27 additions & 20 deletions sense/LightWeightCamServer.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import logging

import cv2
from flask import Flask, jsonify
from flask import Flask, jsonify, request

from ScreenSpacePreview import ScreenSpacePreview
from SenseMapViewer import SenseMapViewer
from Util import restEncodeImage
from communication.ServerSession import ServerSession

Expand All @@ -15,33 +17,39 @@ class LightWeightCamServer:
todo: remove tracking - Imesha
"""

def __init__(self, sense, capture, scale=(0.5, 0.5)):
def __init__(self, port, sense, capture, scale=(0.5, 0.5)):
"""
Initialize
"""
self.logger = logging.getLogger("CamServer")

self.app = Flask(__name__)
self.sense = sense
self.capture = capture
self.scale = scale
self.port = 10005
self.port = port
self.server_session = ServerSession(my_ip="localhost", my_port=self.port)
self.logger = logging.getLogger("CamServer")

self.screen_preview = ScreenSpacePreview(sense.position_mapper)
self.track_viewer = SenseMapViewer([sense.position_mapper], True)

def load_config(self, default_markers=None, default_map_markers=None):
"""
Connect to server and load configurations from it
:return:
"""
self.track_viewer.setupWindows()

# Obtain preview frame to be sent to server for configuration
status, frame = self.capture.read()

frame = cv2.resize(frame, (0, 0), fx=self.scale[0], fy=self.scale[1])

w, h, depth = frame.shape
h, w, depth = frame.shape
self.server_session.configureMapping(frame, w, h, default_markers, default_map_markers)
# Obtain marker points from server. Blocks until server responds marker points.
mapping = self.server_session.obtainMapping()
self.logger.info("Obtained mapping: %s", mapping)
self.logger.info("Obtained mapping: {} - {}".format(mapping.screen_space_points, mapping.world_space_points))

self.sense.position_mapper.screen_points = mapping.screen_space_points
# TODO: Do scaling correction
Expand Down Expand Up @@ -78,7 +86,8 @@ def get_map_at(self, frame_time):
# Generate Response
for sensed_person in self.sense.sensed_persons.values():
person = sensed_person.tracked_person
# Append to results

# self.logger.debug("Result: ({},{})".format(person.position[0], person.position[1]))
result = {
"x": int(person.position[0]),
"y": int(person.position[1]),
Expand All @@ -93,16 +102,9 @@ def get_map_at(self, frame_time):
detection = sensed_person.tracked_person.detection
# print(detection.person_bound)
(dc_x, dc_y, dc_w, dc_h) = map(int, detection.person_bound)
(f_x, f_y, f_w, f_h) = detection.person_bound
(l_x, l_y) = detection.leg_point
(el_x, el_y) = detection.estimated_leg_point
cv2.rectangle(frame, (f_x, f_y), (f_w, f_h), (0, 0, 0), 2)
cv2.drawMarker(frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS, thickness=3)
cv2.drawMarker(frame, (int(el_x), int(el_y)), (255, 0, 255), cv2.MARKER_DIAMOND)

snap = frame[dc_y:dc_h, dc_x:dc_w]
if snap.shape[0] > 0 and snap.shape[1] > 0:
logging.debug("Snapping person")
result["image"] = restEncodeImage(snap)
result_coordinates.append(result)

Expand All @@ -117,8 +119,15 @@ def get_map_at(self, frame_time):
"personCoordinates": result_coordinates
}

cv2.imshow("output", frame)
cv2.waitKey(1)
self.screen_preview.renderFrame(self.sense, frame, None)
self.track_viewer.showMap([self.sense])

key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()

return result

Expand All @@ -128,14 +137,12 @@ def start_cam_server(self):
:param cam_port: port of camera server
:return:
"""
app = Flask(__name__)

_self_shaddow = self

@app.route('/getMap/<int:frame_time>')
@self.app.route('/getMap/<int:frame_time>')
def getMapFR(frame_time):
logging.debug("Sending map for timestamp - %d", frame_time)
return jsonify(_self_shaddow.get_map_at(frame_time))

logging.info("Starting server at " + str(self.port))
app.run(port=self.port)
self.app.run(port=self.port)
13 changes: 7 additions & 6 deletions sense/ScreenSpacePreview.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,19 +45,22 @@ def renderFrame(self, sense, colour_frame, gray_frame=None):
(l_x, l_y) = person.leg_point
(el_x, el_y) = person.estimated_leg_point
cv2.rectangle(colour_frame, (f_x, f_y), (f_w, f_h), (0, 0, 0), 2)
cv2.drawMarker(colour_frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS,thickness=3)
cv2.drawMarker(colour_frame, (int(l_x), int(l_y)), (255, 0, 255), cv2.MARKER_CROSS, thickness=3)
cv2.drawMarker(colour_frame, (int(el_x), int(el_y)), (255, 0, 255), cv2.MARKER_DIAMOND)

for k,v in person.tracked_points.items():
cv2.drawMarker(colour_frame, (v[0],v[1]), (0, 0, 255),cv2.MARKER_TILTED_CROSS,10)
for k, v in person.tracked_points.items():
cv2.drawMarker(colour_frame, (v[0], v[1]), (0, 0, 255), cv2.MARKER_TILTED_CROSS, 10)

# cv2.putText(frame, str(person.head_direction), (int(person.central_point[0]), int(person.central_point[1])),
# cv2.FONT_HERSHEY_COMPLEX, 0.4, (0, 255, 0))

if self.transform_mapper is not None:
# Render markers
for marker in self.transform_mapper.screen_points:
cv2.drawMarker(colour_frame, marker, (0, 0, 255))
# print(marker)
cv2.drawMarker(colour_frame, marker, (0, 125, 255), markerType=cv2.MARKER_STAR, thickness=3)
else:
print("No Mapper")

for sensed_person in sense.sensed_persons.values():
person = sensed_person.tracked_person
Expand All @@ -71,8 +74,6 @@ def renderFrame(self, sense, colour_frame, gray_frame=None):
colour = (0, 255, 0) # Colour indicating sitting
pose_text = "Sit"



# cv2.putText(colour_frame, str(person.label),
# (int(person.detection.central_point[0]), int(person.detection.central_point[1])),
# cv2.FONT_HERSHEY_COMPLEX, 0.4, colour)
Expand Down
5 changes: 5 additions & 0 deletions sense/test_videos/videos.conf
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@ path = test_videos/bia/pier2.mkv
markers = []
map_markers = []

[bia.departure]
path = test_videos/bia/departure.mkv
markers = []
map_markers = []

[office.room]
path = test_videos/test_office.mp4
markers = [(615, 340), (13, 334), (175, 90), (430, 85)]
Expand Down

0 comments on commit d9630da

Please sign in to comment.