From cd16dab5cb032651167c9e48cb58d9aea35f0597 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 21:34:19 +0530 Subject: [PATCH 1/9] tf 2.x compatible --- yolo3/model.py | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/yolo3/model.py b/yolo3/model.py index 13b5c7693..9e3da027c 100644 --- a/yolo3/model.py +++ b/yolo3/model.py @@ -4,12 +4,10 @@ import numpy as np import tensorflow as tf -from keras import backend as K -from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.normalization import BatchNormalization -from keras.models import Model -from keras.regularizers import l2 +from tensorflow.keras import backend as K +from tensorflow.keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D, LeakyReLU, BatchNormalization +from tensorflow.keras.models import Model +from tensorflow.keras.regularizers import l2 from yolo3.utils import compose @@ -123,22 +121,22 @@ def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. - anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) + anchors_tensor = tf.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) - grid_shape = K.shape(feats)[1:3] # height, width - grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), + grid_shape = K.shape(feats).numpy()[1:3] # height, width + grid_y = tf.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) - grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), + grid_x = tf.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) - grid = K.cast(grid, K.dtype(feats)) + grid = tf.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. - box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) - box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) + box_xy = (K.sigmoid(feats[..., :2]) + grid) / tf.cast(grid_shape[::-1], K.dtype(feats)) + box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / tf.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) @@ -151,8 +149,8 @@ def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] - input_shape = K.cast(input_shape, K.dtype(box_yx)) - image_shape = K.cast(image_shape, K.dtype(box_yx)) + input_shape = tf.cast(input_shape, K.dtype(box_yx)) + image_shape = tf.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape @@ -194,7 +192,7 @@ def yolo_eval(yolo_outputs, """Evaluate YOLO model on given input and return filtered boxes.""" num_layers = len(yolo_outputs) anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting - input_shape = K.shape(yolo_outputs[0])[1:3] * 32 + input_shape = K.shape(yolo_outputs[0]).numpy()[1:3] * 32 boxes = [] box_scores = [] for l in range(num_layers): @@ -362,11 +360,11 @@ def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): yolo_outputs = args[:num_layers] y_true = args[num_layers:] anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] - input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) - grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] + input_shape = tf.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) + grid_shapes = [tf.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] loss = 0 m = K.shape(yolo_outputs[0])[0] # batch size, tensor - mf = K.cast(m, K.dtype(yolo_outputs[0])) + mf = tf.cast(m, K.dtype(yolo_outputs[0])) for l in range(num_layers): object_mask = y_true[l][..., 4:5] @@ -384,12 +382,12 @@ def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): # Find ignore mask, iterate over each of batch. ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True) - object_mask_bool = K.cast(object_mask, 'bool') + object_mask_bool = tf.cast(object_mask, 'bool') def loop_body(b, ignore_mask): true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0]) iou = box_iou(pred_box[b], true_box) best_iou = K.max(iou, axis=-1) - ignore_mask = ignore_mask.write(b, K.cast(best_iou Date: Tue, 24 Aug 2021 21:42:47 +0530 Subject: [PATCH 2/9] tf 2.x compatible --- convert.py | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/convert.py b/convert.py index 8723e66c2..6be90609e 100644 --- a/convert.py +++ b/convert.py @@ -1,6 +1,6 @@ #! /usr/bin/env python """ -Reads Darknet config and weights and creates Keras model with TF backend. +Reads Darknet config and weights and creates tensorflow.keras model with TF backend. """ @@ -11,29 +11,27 @@ from collections import defaultdict import numpy as np -from keras import backend as K -from keras.layers import (Conv2D, Input, ZeroPadding2D, Add, - UpSampling2D, MaxPooling2D, Concatenate) -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.normalization import BatchNormalization -from keras.models import Model -from keras.regularizers import l2 -from keras.utils.vis_utils import plot_model as plot +from tensorflow.keras import backend as K +from tensorflow.keras.layers import Conv2D, Input, ZeroPadding2D, Add,UpSampling2D, MaxPooling2D,LeakyReLU,Concatenate,BatchNormalization +from tensorflow.keras.models import Model +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.utils import plot_model as plot -parser = argparse.ArgumentParser(description='Darknet To Keras Converter.') + +parser = argparse.ArgumentParser(description='Darknet To tensorflow.keras Converter.') parser.add_argument('config_path', help='Path to Darknet cfg file.') parser.add_argument('weights_path', help='Path to Darknet weights file.') -parser.add_argument('output_path', help='Path to output Keras model file.') +parser.add_argument('output_path', help='Path to output tensorflow.keras model file.') parser.add_argument( '-p', '--plot_model', - help='Plot generated Keras model and save as image.', + help='Plot generated tensorflow.keras model and save as image.', action='store_true') parser.add_argument( '-w', '--weights_only', - help='Save as Keras weights file instead of model file.', + help='Save as tensorflow.keras weights file instead of model file.', action='store_true') def unique_config_sections(config_file): @@ -84,7 +82,7 @@ def _main(args): cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) - print('Creating Keras model.') + print('Creating tensorflow.keras model.') input_layer = Input(shape=(None, None, 3)) prev_layer = input_layer all_layers = [] @@ -240,10 +238,10 @@ def _main(args): print(model.summary()) if args.weights_only: model.save_weights('{}'.format(output_path)) - print('Saved Keras weights to {}'.format(output_path)) + print('Saved tensorflow.keras weights to {}'.format(output_path)) else: model.save('{}'.format(output_path)) - print('Saved Keras model to {}'.format(output_path)) + print('Saved tensorflow.keras model to {}'.format(output_path)) # Check to see if all weights have been read. remaining_weights = len(weights_file.read()) / 4 From d3ed642a17f659dbfc08cff9ae7238355f4d6cc7 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 21:45:59 +0530 Subject: [PATCH 3/9] tf 2.x compatible --- train.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/train.py b/train.py index e0a953f99..a6b579fc3 100644 --- a/train.py +++ b/train.py @@ -3,11 +3,11 @@ """ import numpy as np -import keras.backend as K -from keras.layers import Input, Lambda -from keras.models import Model -from keras.optimizers import Adam -from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping +import tensorflow.keras.backend as K +from tensorflow.keras.layers import Input, Lambda +from tensorflow.keras.models import Model +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data @@ -56,7 +56,7 @@ def _main(): batch_size = 32 print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) - model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), + model.fit(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), @@ -75,7 +75,7 @@ def _main(): batch_size = 32 # note that more GPU memory is required after unfreezing the body print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) - model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), + model.fit(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), @@ -105,7 +105,6 @@ def get_anchors(anchors_path): def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' - K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) @@ -135,7 +134,6 @@ def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5'): '''create the training model, for Tiny YOLOv3''' - K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) @@ -163,7 +161,7 @@ def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, f return model def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): - '''data generator for fit_generator''' + '''data generator for fit''' n = len(annotation_lines) i = 0 while True: From e82cece4fbf22e613b2aa8e0d21951aeafedc8dc Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 21:47:40 +0530 Subject: [PATCH 4/9] tf 2.x compatible --- train_bottleneck.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/train_bottleneck.py b/train_bottleneck.py index 517ac125e..991ec96d6 100644 --- a/train_bottleneck.py +++ b/train_bottleneck.py @@ -3,11 +3,11 @@ """ import os import numpy as np -import keras.backend as K -from keras.layers import Input, Lambda -from keras.models import Model -from keras.optimizers import Adam -from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping +import tensorflow.keras.backend as K +from tensorflow.keras.layers import Input, Lambda +from tensorflow.keras.models import Model +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data @@ -63,7 +63,7 @@ def _main(): print("Training last layers with bottleneck features") print('with {} samples, val on {} samples and batch size {}.'.format(num_train, num_val, batch_size)) last_layer_model.compile(optimizer='adam', loss={'yolo_loss': lambda y_true, y_pred: y_pred}) - last_layer_model.fit_generator(bottleneck_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, bottlenecks_train), + last_layer_model.fit(bottleneck_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, bottlenecks_train), steps_per_epoch=max(1, num_train//batch_size), validation_data=bottleneck_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, bottlenecks_val), validation_steps=max(1, num_val//batch_size), @@ -77,7 +77,7 @@ def _main(): 'yolo_loss': lambda y_true, y_pred: y_pred}) batch_size = 16 print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) - model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), + model.fit(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), @@ -96,7 +96,7 @@ def _main(): batch_size = 4 # note that more GPU memory is required after unfreezing the body print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) - model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), + model.fit(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), @@ -126,7 +126,6 @@ def get_anchors(anchors_path): def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' - K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) @@ -174,7 +173,7 @@ def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze return model, bottleneck_model, last_layer_model def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random=True, verbose=False): - '''data generator for fit_generator''' + '''data generator for fit''' n = len(annotation_lines) i = 0 while True: From e4a83a77089951d3da235c61bf69843b89656138 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 22:11:27 +0530 Subject: [PATCH 5/9] tf 2.x compatible --- yolo.py | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/yolo.py b/yolo.py index 4aa348643..0800c2b16 100644 --- a/yolo.py +++ b/yolo.py @@ -3,21 +3,14 @@ Class definition of YOLO_v3 style detection model on image and video """ -import colorsys import os -from timeit import default_timer as timer - +import colorsys import numpy as np -from keras import backend as K -from keras.models import load_model -from keras.layers import Input +import tensorflow.keras.backend as K +from tensorflow.keras.models import load_model +from tensorflow.keras.layers import Input from PIL import Image, ImageFont, ImageDraw - -from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body -from yolo3.utils import letterbox_image -import os -from keras.utils import multi_gpu_model - +from timeit import default_timer as timer class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', @@ -41,7 +34,6 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) # and update with user overrides self.class_names = self._get_class() self.anchors = self._get_anchors() - self.sess = K.get_session() self.boxes, self.scores, self.classes = self.generate() def _get_class(self): @@ -93,10 +85,13 @@ def generate(self): # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num>=2: - self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) + strategy = tf.distribute.MirroredStrategy() + with strategy.scope(): + self.yolo_model = load_model(model_path, compile=False) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) + return boxes, scores, classes def detect_image(self, image): @@ -116,13 +111,9 @@ def detect_image(self, image): image_data /= 255. image_data = np.expand_dims(image_data, 0) # Add batch dimension. - out_boxes, out_scores, out_classes = self.sess.run( - [self.boxes, self.scores, self.classes], - feed_dict={ - self.yolo_model.input: image_data, - self.input_image_shape: [image.size[1], image.size[0]], - K.learning_phase(): 0 - }) + out_boxes, out_scores, out_classes = yolo_eval(self.yolo_model.predict(image_data), self.anchors, + len(self.class_names), (image_data.shape[0], image_data.shape[0]), + score_threshold=self.score, iou_threshold=self.iou) print('Found {} boxes for {}'.format(len(out_boxes), 'img')) @@ -166,8 +157,6 @@ def detect_image(self, image): print(end - start) return image - def close_session(self): - self.sess.close() def detect_video(yolo, video_path, output_path=""): import cv2 @@ -208,5 +197,4 @@ def detect_video(yolo, video_path, output_path=""): out.write(result) if cv2.waitKey(1) & 0xFF == ord('q'): break - yolo.close_session() From 83f94b5e8740a055053dfda877da6a90cabecf15 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 22:13:34 +0530 Subject: [PATCH 6/9] tf 2.x compatible --- yolo_video.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yolo_video.py b/yolo_video.py index 7c394617c..52e3a8906 100644 --- a/yolo_video.py +++ b/yolo_video.py @@ -14,7 +14,6 @@ def detect_img(yolo): else: r_image = yolo.detect_image(image) r_image.show() - yolo.close_session() FLAGS = None From fa756b200a94cbaa448643b77795b07cf78182e1 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 22:29:49 +0530 Subject: [PATCH 7/9] tf 2.x compatible --- yolo.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/yolo.py b/yolo.py index 0800c2b16..04120dd5c 100644 --- a/yolo.py +++ b/yolo.py @@ -11,6 +11,10 @@ from tensorflow.keras.layers import Input from PIL import Image, ImageFont, ImageDraw from timeit import default_timer as timer + +from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body +from yolo3.utils import letterbox_image + class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', From bea1bd1e5fdf6db72cefe95bfd67fbb4cd045ced Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Tue, 24 Aug 2021 23:12:54 +0530 Subject: [PATCH 8/9] tf 2.x compatible --- yolo3/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yolo3/model.py b/yolo3/model.py index 9e3da027c..f90dec818 100644 --- a/yolo3/model.py +++ b/yolo3/model.py @@ -123,7 +123,7 @@ def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = tf.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) - grid_shape = K.shape(feats).numpy()[1:3] # height, width + grid_shape = K.shape(feats)[1:3] # height, width grid_y = tf.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = tf.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), @@ -192,7 +192,7 @@ def yolo_eval(yolo_outputs, """Evaluate YOLO model on given input and return filtered boxes.""" num_layers = len(yolo_outputs) anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting - input_shape = K.shape(yolo_outputs[0]).numpy()[1:3] * 32 + input_shape = K.shape(yolo_outputs[0])[1:3] * 32 boxes = [] box_scores = [] for l in range(num_layers): From 8793701a0f059a2a1faccd55ad15c7169213d7f1 Mon Sep 17 00:00:00 2001 From: Susnato Dhar Date: Wed, 25 Aug 2021 00:15:20 +0530 Subject: [PATCH 9/9] tf 2.x compaitable --- yolo3/model.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/yolo3/model.py b/yolo3/model.py index f90dec818..6f988f338 100644 --- a/yolo3/model.py +++ b/yolo3/model.py @@ -129,14 +129,14 @@ def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): grid_x = tf.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) - grid = tf.cast(grid, K.dtype(feats)) + grid = tf.cast(grid, feats.dtype) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. - box_xy = (K.sigmoid(feats[..., :2]) + grid) / tf.cast(grid_shape[::-1], K.dtype(feats)) - box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / tf.cast(input_shape[::-1], K.dtype(feats)) + box_xy = (K.sigmoid(feats[..., :2]) + grid) / tf.cast(grid_shape[::-1], feats.dtype) + box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / tf.cast(input_shape[::-1], feats.dtype) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) @@ -149,8 +149,8 @@ def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] - input_shape = tf.cast(input_shape, K.dtype(box_yx)) - image_shape = tf.cast(image_shape, K.dtype(box_yx)) + input_shape = tf.cast(input_shape, box_yx.dtype) + image_shape = tf.cast(image_shape, box_yx.dtype) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape @@ -360,11 +360,11 @@ def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): yolo_outputs = args[:num_layers] y_true = args[num_layers:] anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] - input_shape = tf.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) - grid_shapes = [tf.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] + input_shape = tf.cast(K.shape(yolo_outputs[0])[1:3] * 32, y_true[0].dtype) + grid_shapes = [tf.cast(K.shape(yolo_outputs[l])[1:3], y_true[0].dtype) for l in range(num_layers)] loss = 0 m = K.shape(yolo_outputs[0])[0] # batch size, tensor - mf = tf.cast(m, K.dtype(yolo_outputs[0])) + mf = tf.cast(m, yolo_outputs[0].dtype) for l in range(num_layers): object_mask = y_true[l][..., 4:5] @@ -381,13 +381,13 @@ def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4] # Find ignore mask, iterate over each of batch. - ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True) + ignore_mask = tf.TensorArray(y_true[0].dtype, size=1, dynamic_size=True) object_mask_bool = tf.cast(object_mask, 'bool') def loop_body(b, ignore_mask): true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0]) iou = box_iou(pred_box[b], true_box) best_iou = K.max(iou, axis=-1) - ignore_mask = ignore_mask.write(b, tf.cast(best_iou