diff --git a/examples/official/9.x/yolo_qr/416x416.jpg b/examples/official/9.x/yolo_qr/416x416.jpg new file mode 100644 index 0000000..a1f5800 Binary files /dev/null and b/examples/official/9.x/yolo_qr/416x416.jpg differ diff --git a/examples/official/9.x/yolo_qr/README.md b/examples/official/9.x/yolo_qr/README.md new file mode 100644 index 0000000..877c5f1 --- /dev/null +++ b/examples/official/9.x/yolo_qr/README.md @@ -0,0 +1,92 @@ +# QR Detection with OpenCV and YOLO Model in Python +This repository provides samples demonstrating how to detect QR codes using **YOLO** and how to read QR codes with the [Dynamsoft Barcode Reader](https://www.dynamsoft.com/barcode-reader/overview/). + +## Prerequisites +- OpenCV 4.x + + ``` + pip install opencv-python + ``` + +- Dynamsoft Barcode Reader + + ``` + pip install dbr + ``` +- Obtain a [Dynamsoft Barcode Reader trial license](https://www.dynamsoft.com/customer/license/trialLicense?product=dbr) and update your code with the provided license key: + + ```python + from dbr import * + + license_key = "LICENSE-KEY" + BarcodeReader.init_license(license_key) + reader = BarcodeReader() + ``` + + +## Usage + +#### QR Detection + +- From Image File: + + ``` + python3 opencv-yolo.py + ``` + +- From Camera: + + ``` + python3 opencv-yolo-camera.py + ``` + + ![OpenCV YOLO for QR detection](https://www.dynamsoft.com/codepool/img/2020/11/opencv-dnn-yolo3-qr-detection.gif) + +#### QR Reading with Dynamsoft Barcode Reader + +Below is a sample code snippet for reading QR codes with the Dynamsoft Barcode Reader: + +```py +from dbr import * + +license_key = "LICENSE-KEY" +BarcodeReader.init_license(license_key) +reader = BarcodeReader() +settings = reader.reset_runtime_settings() +settings = reader.get_runtime_settings() +settings.region_bottom = bottom +settings.region_left = left +settings.region_right = right +settings.region_top = top +reader.update_runtime_settings(settings) + +try: + text_results = reader.decode_buffer(frame) + + if text_results != None: + for text_result in text_results: + print("Barcode Format :") + print(text_result.barcode_format_string) + print("Barcode Text :") + print(text_result.barcode_text) + print("Localization Points : ") + print(text_result.localization_result.localization_points) + print("-------------") +except BarcodeReaderError as bre: + print(bre) +``` + +- From Image File: + + ``` + python3 yolo-dbr.py + ``` + +- From Camera: + + ``` + python3 yolo-dbr-camera.py + ``` + +## Blog +[How to Detect and Decode QR Code with YOLO, OpenCV, and Dynamsoft Barcode Reader](https://www.dynamsoft.com/codepool/qr-code-detect-decode-yolo-opencv.html) \ No newline at end of file diff --git a/examples/official/9.x/yolo_qr/cropped.jpg b/examples/official/9.x/yolo_qr/cropped.jpg new file mode 100644 index 0000000..f9ed65c Binary files /dev/null and b/examples/official/9.x/yolo_qr/cropped.jpg differ diff --git a/examples/official/9.x/yolo_qr/dbr-only-camera.py b/examples/official/9.x/yolo_qr/dbr-only-camera.py new file mode 100644 index 0000000..ad3bc3e --- /dev/null +++ b/examples/official/9.x/yolo_qr/dbr-only-camera.py @@ -0,0 +1,145 @@ +from time import sleep +import cv2 as cv +import numpy as np +import time +from threading import Thread +import queue +from dbr import * + +license_key = "DLS2eyJoYW5kc2hha2VDb2RlIjoiMjAwMDAxLTE2NDk4Mjk3OTI2MzUiLCJvcmdhbml6YXRpb25JRCI6IjIwMDAwMSIsInNlc3Npb25QYXNzd29yZCI6IndTcGR6Vm05WDJrcEQ5YUoifQ==" +BarcodeReader.init_license(license_key) +reader = BarcodeReader() +color = (0, 0, 255) +thickness = 2 + + +def decodeframe(frame): + + try: + outs = reader.decode_buffer(frame) + if outs != None: + return outs + except BarcodeReaderError as bre: + print(bre) + + return None + + +winName = 'QR Detection' + + +def postprocess(frame, outs): + if outs == None: + return + + for out in outs: + points = out.localization_result.localization_points + + cv.line(frame, points[0], points[1], color, thickness) + cv.line(frame, points[1], points[2], color, thickness) + cv.line(frame, points[2], points[3], color, thickness) + cv.line(frame, points[3], points[0], color, thickness) + cv.putText(frame, out.barcode_text, (min([point[0] for point in points]), min( + [point[1] for point in points])), cv.FONT_HERSHEY_SIMPLEX, 1, color, thickness) + + +cap = cv.VideoCapture(0) + + +class QueueFPS(queue.Queue): + def __init__(self): + queue.Queue.__init__(self) + self.startTime = 0 + self.counter = 0 + + def put(self, v): + queue.Queue.put(self, v) + self.counter += 1 + if self.counter == 1: + self.startTime = time.time() + + def getFPS(self): + return self.counter / (time.time() - self.startTime) + + +process = True + +# +# Frames capturing thread +# +framesQueue = QueueFPS() + + +def framesThreadBody(): + global framesQueue, process + + while process: + hasFrame, frame = cap.read() + if not hasFrame: + break + framesQueue.put(frame) + + +# +# Frames processing thread +# +decodingQueue = QueueFPS() + + +def processingThreadBody(): + global decodingQueue, process + + while process: + # Get a next frame + frame = None + try: + frame = framesQueue.get_nowait() + framesQueue.queue.clear() + except queue.Empty: + pass + + if not frame is None: + outs = decodeframe(frame) + decodingQueue.put((frame, outs)) + sleep(0.03) + + +framesThread = Thread(target=framesThreadBody) +framesThread.start() + +processingThread = Thread(target=processingThreadBody) +processingThread.start() + +# +# Postprocessing and rendering loop +# +while cv.waitKey(1) < 0: + try: + # Request prediction first because they put after frames + outs = decodingQueue.get_nowait() + frame = outs[0] + postprocess(frame, outs[1]) + + # Put efficiency information. + if decodingQueue.counter > 1: + label = 'Camera: %.2f FPS' % (framesQueue.getFPS()) + cv.putText(frame, label, (0, 15), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'DBR SDK: %.2f FPS' % (decodingQueue.getFPS()) + cv.putText(frame, label, (0, 30), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'Skipped frames: %d' % ( + framesQueue.counter - decodingQueue.counter) + cv.putText(frame, label, (0, 45), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + cv.imshow(winName, frame) + except queue.Empty: + pass + + +process = False +framesThread.join() +processingThread.join() diff --git a/examples/official/9.x/yolo_qr/dbr-only.py b/examples/official/9.x/yolo_qr/dbr-only.py new file mode 100644 index 0000000..d7b979d --- /dev/null +++ b/examples/official/9.x/yolo_qr/dbr-only.py @@ -0,0 +1,45 @@ +import cv2 as cv +import numpy as np +import time +from dbr import * + +license_key = "DLS2eyJoYW5kc2hha2VDb2RlIjoiMjAwMDAxLTE2NDk4Mjk3OTI2MzUiLCJvcmdhbml6YXRpb25JRCI6IjIwMDAwMSIsInNlc3Npb25QYXNzd29yZCI6IndTcGR6Vm05WDJrcEQ5YUoifQ==" +BarcodeReader.init_license(license_key) +reader = BarcodeReader() +color = (0, 0, 255) +thickness = 2 + + +def decodeframe(frame): + + try: + text_results = reader.decode_buffer(frame) + + if text_results != None: + for text_result in text_results: + print("Barcode Format :") + print(text_result.barcode_format_string) + print("Barcode Text :") + print(text_result.barcode_text) + print("Localization Points : ") + print(text_result.localization_result.localization_points) + print("-------------") + points = text_result.localization_result.localization_points + + cv.line(frame, points[0], points[1], color, thickness) + cv.line(frame, points[1], points[2], color, thickness) + cv.line(frame, points[2], points[3], color, thickness) + cv.line(frame, points[3], points[0], color, thickness) + + cv.putText(frame, text_result.barcode_text, (min([point[0] for point in points]), min( + [point[1] for point in points])), cv.FONT_HERSHEY_SIMPLEX, 1, color, thickness) + except BarcodeReaderError as bre: + print(bre) + + +# Load an frame +frame = cv.imread("416x416.jpg") +decodeframe(frame) + +cv.imshow('QR Detection', frame) +cv.waitKey() diff --git a/examples/official/9.x/yolo_qr/opencv-yolo-camera.py b/examples/official/9.x/yolo_qr/opencv-yolo-camera.py new file mode 100644 index 0000000..6213e6e --- /dev/null +++ b/examples/official/9.x/yolo_qr/opencv-yolo-camera.py @@ -0,0 +1,175 @@ +# https://opencv-tutorial.readthedocs.io/en/latest/yolo/yolo.html +# https://docs.opencv.org/master/d6/d0f/group__dnn.html +# https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html +# https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py +import cv2 as cv +import numpy as np +import time +from threading import Thread +import queue + +winName = 'QR Detection' + +threshold = 0.6 + +# Load class names and YOLOv3-tiny model +classes = open('qrcode.names').read().strip().split('\n') +net = cv.dnn.readNetFromDarknet( + 'qrcode-yolov3-tiny.cfg', 'qrcode-yolov3-tiny.weights') +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +# net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # DNN_TARGET_OPENCL DNN_TARGET_CPU + + +def postprocess(frame, outs): + frameHeight, frameWidth = frame.shape[:2] + + classIds = [] + confidences = [] + boxes = [] + + for out in outs: + for detection in out: + scores = detection[5:] + classId = np.argmax(scores) + confidence = scores[classId] + if confidence > threshold: + x, y, width, height = detection[:4] * np.array( + [frameWidth, frameHeight, frameWidth, frameHeight]) + left = int(x - width / 2) + top = int(y - height / 2) + classIds.append(classId) + confidences.append(float(confidence)) + boxes.append([left, top, int(width), int(height)]) + + indices = cv.dnn.NMSBoxes(boxes, confidences, threshold, threshold - 0.1) + if not isinstance(indices, tuple): + for i in indices.flatten(): + box = boxes[i] + left = box[0] + top = box[1] + width = box[2] + height = box[3] + + # Draw bounding box for objects + cv.rectangle(frame, (left, top), + (left + width, top + height), (0, 0, 255)) + + # Draw class name and confidence + label = '%s:%.2f' % (classes[classIds[i]], confidences[i]) + cv.putText(frame, label, (left, top), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) + + +# Determine the output layer +ln = net.getLayerNames() +ln = [ln[i - 1] for i in net.getUnconnectedOutLayers().flatten()] + + +cap = cv.VideoCapture(0) + + +class QueueFPS(queue.Queue): + def __init__(self): + queue.Queue.__init__(self) + self.startTime = 0 + self.counter = 0 + + def put(self, v): + queue.Queue.put(self, v) + self.counter += 1 + if self.counter == 1: + self.startTime = time.time() + + def getFPS(self): + return self.counter / (time.time() - self.startTime) + + +process = True + +# +# Frames capturing thread +# +framesQueue = QueueFPS() + + +def framesThreadBody(): + global framesQueue, process + + while process: + hasFrame, frame = cap.read() + if not hasFrame: + break + framesQueue.put(frame) + + +# +# Frames processing thread +# +processedFramesQueue = queue.Queue() +predictionsQueue = QueueFPS() + + +def processingThreadBody(): + global processedFramesQueue, predictionsQueue, process + + while process: + # Get a next frame + frame = None + try: + frame = framesQueue.get_nowait() + framesQueue.queue.clear() + except queue.Empty: + continue + + if frame is not None: + blob = cv.dnn.blobFromImage( + frame, 1/255, (416, 416), swapRB=True, crop=False) + processedFramesQueue.put(frame) + + # Run a model + net.setInput(blob) + # Compute + outs = net.forward(ln) + predictionsQueue.put(outs) + + +framesThread = Thread(target=framesThreadBody) +framesThread.start() + +processingThread = Thread(target=processingThreadBody) +processingThread.start() + +# +# Postprocessing and rendering loop +# +while cv.waitKey(1) < 0: + try: + # Request prediction first because they put after frames + outs = predictionsQueue.get_nowait() + frame = processedFramesQueue.get_nowait() + + postprocess(frame, outs) + + # Put efficiency information. + if predictionsQueue.counter > 1: + label = 'Camera: %.2f FPS' % (framesQueue.getFPS()) + cv.putText(frame, label, (0, 15), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'Network: %.2f FPS' % (predictionsQueue.getFPS()) + cv.putText(frame, label, (0, 30), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'Skipped frames: %d' % ( + framesQueue.counter - predictionsQueue.counter) + cv.putText(frame, label, (0, 45), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + cv.imshow(winName, frame) + except queue.Empty: + pass + + +process = False +framesThread.join() +processingThread.join() diff --git a/examples/official/9.x/yolo_qr/opencv-yolo.py b/examples/official/9.x/yolo_qr/opencv-yolo.py new file mode 100644 index 0000000..2dca7cc --- /dev/null +++ b/examples/official/9.x/yolo_qr/opencv-yolo.py @@ -0,0 +1,111 @@ +# https://opencv-tutorial.readthedocs.io/en/latest/yolo/yolo.html +# https://docs.opencv.org/master/d6/d0f/group__dnn.html +# https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html +# https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py +import cv2 as cv +import numpy as np +import time + +# Load an image +frame = cv.imread("416x416.jpg") +# frame = cv.imread("test.jpg") + +threshold = 0.6 +maxWidth = 1280 +maxHeight = 720 +imgHeight, imgWidth = frame.shape[:2] +hScale = 1 +wScale = 1 +thickness = 1 + +if imgHeight > maxHeight: + hScale = imgHeight / maxHeight + thickness = 6 + +if imgWidth > maxWidth: + wScale = imgWidth / maxWidth + thickness = 6 + +# Load class names and YOLOv3-tiny model +classes = open('qrcode.names').read().strip().split('\n') +net = cv.dnn.readNetFromDarknet( + 'qrcode-yolov3-tiny.cfg', 'qrcode-yolov3-tiny.weights') +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +# net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # DNN_TARGET_OPENCL DNN_TARGET_CPU DNN_TARGET_CUDA + +start_time = time.monotonic() +# Convert frame to blob +blob = cv.dnn.blobFromImage(frame, 1/255, (416, 416), swapRB=True, crop=False) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('blobFromImage in %.1fms' % (elapsed_ms)) + + +def postprocess(frame, outs): + frameHeight, frameWidth = frame.shape[:2] + + classIds = [] + confidences = [] + boxes = [] + + for out in outs: + for detection in out: + scores = detection[5:] + classId = np.argmax(scores) + confidence = scores[classId] + if confidence > threshold: + x, y, width, height = detection[:4] * np.array( + [frameWidth, frameHeight, frameWidth, frameHeight]) + left = int(x - width / 2) + top = int(y - height / 2) + classIds.append(classId) + confidences.append(float(confidence)) + boxes.append([left, top, int(width), int(height)]) + + indices = cv.dnn.NMSBoxes(boxes, confidences, threshold, threshold - 0.1) + + if not isinstance(indices, tuple): + for i in indices.flatten(): # Flatten the indices if they are in nested format + box = boxes[i] + left = box[0] + top = box[1] + width = box[2] + height = box[3] + + # Crop the detected object from the frame + cropped_image = frame[top:top + height, left:left + width] + cv.imshow('cropped', cropped_image) + cv.imwrite('cropped.jpg', cropped_image) + + # Draw bounding box around the detected object + cv.rectangle(frame, (left, top), (left + width, + top + height), (0, 0, 255), thickness) + + # Draw class name and confidence on the bounding box + label = '%s:%.2f' % (classes[classIds[i]], confidences[i]) + cv.putText(frame, label, (left, top), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) + + +# Determine the output layer +ln = net.getLayerNames() +ln = [ln[i - 1] for i in net.getUnconnectedOutLayers().flatten()] + +net.setInput(blob) +start_time = time.monotonic() +# Compute +outs = net.forward(ln) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('forward in %.1fms' % (elapsed_ms)) + +start_time = time.monotonic() +postprocess(frame, outs) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('postprocess in %.1fms' % (elapsed_ms)) + +if hScale > wScale: + frame = cv.resize(frame, (int(imgWidth / hScale), maxHeight)) +elif hScale < wScale: + frame = cv.resize(frame, (maxWidth, int(imgHeight / wScale))) + +cv.imshow('QR Detection', frame) +cv.waitKey() diff --git a/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.cfg b/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.cfg new file mode 100644 index 0000000..b38e156 --- /dev/null +++ b/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.cfg @@ -0,0 +1,182 @@ +[net] +# Testing +batch=24 +subdivisions=8 +# Training +# batch=64 +# subdivisions=2 +width=416 +height=416 +channels=3 +momentum=0.9 +decay=0.0005 +angle=0 +saturation = 1.5 +exposure = 1.5 +hue=.1 + +learning_rate=0.001 +burn_in=1000 +max_batches = 4000 +policy=steps +steps=3200,3600 +scales=.1,.1 + +[convolutional] +batch_normalize=1 +filters=16 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=32 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=1 + +[convolutional] +batch_normalize=1 +filters=1024 +size=3 +stride=1 +pad=1 +activation=leaky + +########### + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=18 +activation=linear + + + +[yolo] +mask = 3,4,5 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=1 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 + +[route] +layers = -4 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[upsample] +stride=2 + +[route] +layers = -1, 8 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=18 +activation=linear + +[yolo] +mask = 0,1,2 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=1 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 diff --git a/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.weights b/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.weights new file mode 100644 index 0000000..d743311 Binary files /dev/null and b/examples/official/9.x/yolo_qr/qrcode-yolov3-tiny.weights differ diff --git a/examples/official/9.x/yolo_qr/qrcode.names b/examples/official/9.x/yolo_qr/qrcode.names new file mode 100644 index 0000000..56bae9d --- /dev/null +++ b/examples/official/9.x/yolo_qr/qrcode.names @@ -0,0 +1 @@ +QR_CODE \ No newline at end of file diff --git a/examples/official/9.x/yolo_qr/test.jpg b/examples/official/9.x/yolo_qr/test.jpg new file mode 100644 index 0000000..2774426 Binary files /dev/null and b/examples/official/9.x/yolo_qr/test.jpg differ diff --git a/examples/official/9.x/yolo_qr/yolo-dbr-camera.py b/examples/official/9.x/yolo_qr/yolo-dbr-camera.py new file mode 100644 index 0000000..531d3c9 --- /dev/null +++ b/examples/official/9.x/yolo_qr/yolo-dbr-camera.py @@ -0,0 +1,211 @@ +import cv2 as cv +import numpy as np +import time +from threading import Thread +import queue +from dbr import * + +license_key = "DLS2eyJoYW5kc2hha2VDb2RlIjoiMjAwMDAxLTE2NDk4Mjk3OTI2MzUiLCJvcmdhbml6YXRpb25JRCI6IjIwMDAwMSIsInNlc3Npb25QYXNzd29yZCI6IndTcGR6Vm05WDJrcEQ5YUoifQ==" +BarcodeReader.init_license(license_key) +reader = BarcodeReader() + + +def decodeframe(frame, left, top, right, bottom): + settings = reader.reset_runtime_settings() + settings = reader.get_runtime_settings() + settings.region_bottom = bottom + settings.region_left = left + settings.region_right = right + settings.region_top = top + settings.barcode_format_ids = EnumBarcodeFormat.BF_QR_CODE + settings.expected_barcodes_count = 1 + reader.update_runtime_settings(settings) + + try: + text_results = reader.decode_buffer(frame) + + if text_results is not None: + return text_results[0] + except BarcodeReaderError as bre: + print(bre) + + return None + + +winName = 'QR Detection' +threshold = 0.6 + +# Load class names and YOLOv3-tiny model +classes = open('qrcode.names').read().strip().split('\n') +net = cv.dnn.readNetFromDarknet( + 'qrcode-yolov3-tiny.cfg', 'qrcode-yolov3-tiny.weights') +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) + + +def processframe(frame, outs): + results = [] + frameHeight, frameWidth = frame.shape[:2] + + classIds = [] + confidences = [] + boxes = [] + + for out in outs: + for detection in out: + scores = detection[5:] + classId = np.argmax(scores) + confidence = scores[classId] + if confidence > threshold: + x, y, width, height = detection[:4] * np.array( + [frameWidth, frameHeight, frameWidth, frameHeight]) + left = int(x - width / 2) + top = int(y - height / 2) + classIds.append(classId) + confidences.append(float(confidence)) + boxes.append([left, top, int(width), int(height)]) + + indices = cv.dnn.NMSBoxes(boxes, confidences, threshold, threshold - 0.1) + + if not isinstance(indices, tuple): + for i in indices.flatten(): + box = boxes[i] + left, top, width, height = box + + # Decode barcode + result = decodeframe(frame, left, top, left + width, top + height) + if result is None: + results.append((classes[classIds[i]], confidences[i], left, + top, left + width, top + height, "Decoding Failed")) + else: + results.append((classes[classIds[i]], confidences[i], left, + top, left + width, top + height, result.barcode_text)) + + return results + + +def postprocess(frame, outs): + for out in outs: + # Draw bounding box for objects + cv.rectangle(frame, (out[2], out[3]), (out[4], out[5]), (0, 0, 255)) + + # Draw class name and confidence + label = '%s:%.2f' % (out[0], out[1]) + cv.putText(frame, label, (out[2], out[3] - 15), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) + + # Draw barcode results + barcode_label = '%s' % (out[6]) + cv.putText(frame, barcode_label, + (out[2], out[3] - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0)) + + +# Determine the output layer +ln = net.getLayerNames() +ln = [ln[i - 1] for i in net.getUnconnectedOutLayers().flatten()] + +cap = cv.VideoCapture(0) + + +class QueueFPS(queue.Queue): + def __init__(self): + super().__init__() + self.startTime = 0 + self.counter = 0 + + def put(self, v): + super().put(v) + self.counter += 1 + if self.counter == 1: + self.startTime = time.time() + + def getFPS(self): + return self.counter / (time.time() - self.startTime) if self.counter > 1 else 0 + + +process = True + +# Frames capturing thread +framesQueue = QueueFPS() + + +def framesThreadBody(): + global framesQueue, process + + while process: + hasFrame, frame = cap.read() + if not hasFrame: + break + framesQueue.put(frame) + + +# Frames processing thread +processedFramesQueue = queue.Queue() +predictionsQueue = QueueFPS() + + +def processingThreadBody(): + global processedFramesQueue, predictionsQueue, process + + while process: + # Get the next frame + try: + frame = framesQueue.get_nowait() + framesQueue.queue.clear() # Clear older frames to avoid lag + except queue.Empty: + continue + + if frame is not None: + blob = cv.dnn.blobFromImage( + frame, 1/255, (416, 416), swapRB=True, crop=False) + processedFramesQueue.put(frame) + + # Run the model + net.setInput(blob) + outs = net.forward(ln) + + # Process frame and put results into the predictions queue + results = processframe(frame, outs) + predictionsQueue.put(results) + + +framesThread = Thread(target=framesThreadBody) +framesThread.start() + +processingThread = Thread(target=processingThreadBody) +processingThread.start() + +# Postprocessing and rendering loop +while cv.waitKey(1) < 0: + try: + # Retrieve the predictions and processed frame + outs = predictionsQueue.get_nowait() + frame = processedFramesQueue.get_nowait() + + # Process and display the results + postprocess(frame, outs) + + # Display FPS info + if predictionsQueue.counter > 1: + label = 'Camera: %.2f FPS' % framesQueue.getFPS() + cv.putText(frame, label, (0, 15), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'Network: %.2f FPS' % predictionsQueue.getFPS() + cv.putText(frame, label, (0, 30), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + label = 'Skipped frames: %d' % ( + framesQueue.counter - predictionsQueue.counter) + cv.putText(frame, label, (0, 45), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) + + cv.imshow(winName, frame) + except queue.Empty: + continue + +# Cleanup +process = False +framesThread.join() +processingThread.join() +cap.release() +cv.destroyAllWindows() diff --git a/examples/official/9.x/yolo_qr/yolo-dbr.py b/examples/official/9.x/yolo_qr/yolo-dbr.py new file mode 100644 index 0000000..77aea06 --- /dev/null +++ b/examples/official/9.x/yolo_qr/yolo-dbr.py @@ -0,0 +1,125 @@ +import cv2 as cv +import numpy as np +import time +from dbr import * + +license_key = "DLS2eyJoYW5kc2hha2VDb2RlIjoiMjAwMDAxLTE2NDk4Mjk3OTI2MzUiLCJvcmdhbml6YXRpb25JRCI6IjIwMDAwMSIsInNlc3Npb25QYXNzd29yZCI6IndTcGR6Vm05WDJrcEQ5YUoifQ==" +BarcodeReader.init_license(license_key) +reader = BarcodeReader() + + +def decodeframe(frame, left, top, right, bottom): + settings = reader.reset_runtime_settings() + settings = reader.get_runtime_settings() + settings.region_bottom = bottom + settings.region_left = left + settings.region_right = right + settings.region_top = top + settings.barcode_format_ids = EnumBarcodeFormat.BF_QR_CODE + settings.expected_barcodes_count = 1 + reader.update_runtime_settings(settings) + + try: + text_results = reader.decode_buffer(frame) + + if text_results != None: + return text_results[0] + # for text_result in text_results: + # print("Barcode Format :") + # print(text_result.barcode_format_string) + # print("Barcode Text :") + # print(text_result.barcode_text) + # print("Localization Points : ") + # print(text_result.localization_result.localization_points) + # print("-------------") + except BarcodeReaderError as bre: + print(bre) + + return None + + +# Load an image +frame = cv.imread("416x416.jpg") + +threshold = 0.6 + +# Load class names and YOLOv3-tiny model +classes = open('qrcode.names').read().strip().split('\n') +net = cv.dnn.readNetFromDarknet( + 'qrcode-yolov3-tiny.cfg', 'qrcode-yolov3-tiny.weights') +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) +# net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # DNN_TARGET_OPENCL DNN_TARGET_CPU DNN_TARGET_CUDA + +start_time = time.monotonic() +# Convert frame to blob +blob = cv.dnn.blobFromImage(frame, 1/255, (416, 416), swapRB=True, crop=False) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('blobFromImage in %.1fms' % (elapsed_ms)) + + +def postprocess(frame, outs): + frameHeight, frameWidth = frame.shape[:2] + + classIds = [] + confidences = [] + boxes = [] + + for out in outs: + for detection in out: + scores = detection[5:] + classId = np.argmax(scores) + confidence = scores[classId] + if confidence > threshold: + x, y, width, height = detection[:4] * np.array( + [frameWidth, frameHeight, frameWidth, frameHeight]) + left = int(x - width / 2) + top = int(y - height / 2) + classIds.append(classId) + confidences.append(float(confidence)) + boxes.append([left, top, int(width), int(height)]) + + indices = cv.dnn.NMSBoxes(boxes, confidences, threshold, threshold - 0.1) + + if not isinstance(indices, tuple): + for i in indices.flatten(): + box = boxes[i] + left = box[0] + top = box[1] + width = box[2] + height = box[3] + + # Draw bounding box for objects + cv.rectangle(frame, (left, top), + (left + width, top + height), (0, 0, 255)) + + # Draw class name and confidence + label = '%s:%.2f' % (classes[classIds[i]], confidences[i]) + cv.putText(frame, label, (left, top - 15), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0)) + + result = decodeframe(frame, left, top, left + width, top + height) + # Draw barcode results + if not result is None: + label = '%s' % (result.barcode_text) + cv.putText(frame, label, (left, top - 5), + cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0)) + + +# Determine the output layer +ln = net.getLayerNames() +ln = [ln[i - 1] for i in net.getUnconnectedOutLayers().flatten()] + +net.setInput(blob) +start_time = time.monotonic() +# Compute +outs = net.forward(ln) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('forward in %.1fms' % (elapsed_ms)) + +start_time = time.monotonic() +postprocess(frame, outs) +elapsed_ms = (time.monotonic() - start_time) * 1000 +print('postprocess in %.1fms' % (elapsed_ms)) + +cv.imshow('QR Detection', frame) +cv.waitKey()