diff --git a/demo/README.md b/demo/README.md index 7c9db57..104e420 100644 --- a/demo/README.md +++ b/demo/README.md @@ -7,7 +7,7 @@ You can run the DGP pipeline on the example dataset provided in this repo by run python demo/run_dgp_demo.py --dlcpath data/Reaching-Mackenzie-2018-08-30 ``` -The output of the pipeline, including the labeled videos and the h5/csv files with predicted trajectories will be stored in `{DGP_DIR}/data/Reaching-Mackenzie-2018-08-30/videos_pred`. +The output of the pipeline, including the labeled videos and the h5/csv files with predicted trajectories will be stored in `{DGP_DIR}/data/Reaching-Mackenzie-2018-08-30/videos_pred`. You can see information about training statistics in the file `{DGP_DIR}/data/dlc-models/iteration-0/ReachingAug30-trainset95shuffle1/train/learning_stats.csv`. You can run the DGP pipeline on your own dataset as long as it exists in a DLC file directory structure, for example @@ -28,4 +28,4 @@ In particular, you can use the DLC GUI to create a DLC project, label videos, an `python {DGP_DIR}/demo/run_dgp_demo.py --dlcpath {PROJ_DIR}/task-scorer-date/ --shuffle 'the shuffle to run' --dlcsnapshot 'specify the DLC snapshot if you've already run DLC with location refinement'` -If you have not yet run DLC you can simply remove the `--dlcsnapshot` argument and DLC will automatically be fit as part of the pipeline. \ No newline at end of file +If you have not yet run DLC you can simply remove the `--dlcsnapshot` argument and DLC will automatically be fit as part of the pipeline. diff --git a/demo/predict_dgp_demo.py b/demo/predict_dgp_demo.py new file mode 100755 index 0000000..0f9a039 --- /dev/null +++ b/demo/predict_dgp_demo.py @@ -0,0 +1,157 @@ +# If you have collected labels using DLC's GUI you can run DGP with the following +"""Main fitting function for DGP. + step 0: run DLC + step 1: run DGP with labeled frames only + step 2: run DGP with spatial clique + step 3: do prediction on all videos +""" +import argparse +import os +from os import listdir +from os.path import isfile, join +from pathlib import Path +import sys +import yaml + +if sys.platform == 'darwin': + import wx + if int(wx.__version__[0]) > 3: + wx.Thread_IsMain = wx.IsMainThread + +os.environ["DLClight"] = "True" +os.environ["Colab"] = "True" +from deeplabcut.utils import auxiliaryfunctions + +from deepgraphpose.models.fitdgp import fit_dlc, fit_dgp, fit_dgp_labeledonly +from deepgraphpose.models.fitdgp_util import get_snapshot_path +from deepgraphpose.models.eval import plot_dgp +from deepgraphpose.contrib.segment_videos import split_video +from run_dgp_demo import update_config_files_general + +if __name__ == '__main__': + + # %% set up dlcpath for DLC project and hyperparameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--dlcpath", + type=str, + default=None, + help="the absolute path of the DLC project", + ) + + parser.add_argument( + "--snapshot", + type=str, + default=None, + help="use snapshot for prediction. If not given, assumes `snapshot-step2-final--0`", + ) + + parser.add_argument("--shuffle", type=int, default=1, help="Project shuffle") + parser.add_argument( + "--batch_size", + type=int, + default=10, + help="size of the batch, if there are memory issues, decrease it value") + parser.add_argument("--test", action='store_true', default=False) + + parser.add_argument("--split", action = "store_true",help = "whether or not we should run inference on chopped up videos") + parser.add_argument("--splitlength", default = 6000, help= "number of frames in block if splitting videos. ") + + input_params = parser.parse_known_args()[0] + print(input_params) + + dlcpath = input_params.dlcpath + shuffle = input_params.shuffle + if input_params.snapshot is not None: + snapshot = input_params.snapshot + else: + ## Specifying snapshot as the end product of training. + step = 2 + snapshot = 'snapshot-step{}-final--0'.format(step) + + batch_size = input_params.batch_size + test = input_params.test + splitflag,splitlength = input_params.split,input_params.splitlength + + + # update config files + dlcpath = update_config_files_general(dlcpath,shuffle) + update_configs = True + + + try: + + # -------------------------------------------------------------------------------- + # Test DGP model + # -------------------------------------------------------------------------------- + + # %% step 3 predict on all videos in videos_dgp folder + print( + ''' + ========================== + | | + | | + | Predict with DGP | + | | + | | + ========================== + ''' + , flush=True) + + snapshot_path, cfg_yaml = get_snapshot_path(snapshot, dlcpath, shuffle=shuffle) + cfg = auxiliaryfunctions.read_config(cfg_yaml) + + video_path = str(Path(dlcpath) / 'videos_dgp') + if not (os.path.exists(video_path)): + print(video_path + " does not exist!") + video_sets = list(cfg['video_sets']) + else: + video_sets = [ + video_path + '/' + f for f in listdir(video_path) + if isfile(join(video_path, f)) and ( + f.find('avi') > 0 or f.find('mp4') > 0 or f.find('mov') > 0 or f.find( + 'mkv') > 0) + ] + + video_pred_path = str(Path(dlcpath) / 'videos_pred') + if not os.path.exists(video_pred_path): + os.makedirs(video_pred_path) + + if splitflag: + video_cut_path = str(Path(dlcpath) / 'videos_cut') + if not os.path.exists(video_cut_path): + os.makedirs(video_cut_path) + clip_sets = [] + for v in video_sets: + clip_sets.extend(split_video(v,int(splitlength),suffix = "demo",outputloc = video_cut_path)) + else: + clip_sets = [os.path.join(video_cut_path,v) for v in os.listdir(video_cut_path)] + video_sets = clip_sets ## replace video_sets with clipped versions. + print('video_sets', video_sets, flush=True) + + if test: + for video_file in [video_sets[0]]: + from moviepy.editor import VideoFileClip + clip =VideoFileClip(str(video_file)) + if clip.duration > 10: + clip = clip.subclip(10) + video_file_name = os.path.splitext(video_file)[0] +"test"+ ".mp4" + print('\nwriting {}'.format(video_file_name)) + clip.write_videofile(video_file_name) + plot_dgp(str(video_file_name), + str(video_pred_path), + proj_cfg_file=str(cfg_yaml), + dgp_model_file=str(snapshot_path), + shuffle=shuffle) + else: + for video_file in video_sets: + plot_dgp(str(video_file), + str(video_pred_path), + proj_cfg_file=str(cfg_yaml), + dgp_model_file=str(snapshot_path), + shuffle=shuffle) + finally: + pass + + #if update_configs: + # return_configs() diff --git a/demo/predict_dgp_ensemble.py b/demo/predict_dgp_ensemble.py new file mode 100755 index 0000000..c1d4f52 --- /dev/null +++ b/demo/predict_dgp_ensemble.py @@ -0,0 +1,175 @@ +# If you have collected labels using DLC's GUI you can run DGP with the following +"""Ensemble fitting function for DGP. + step 0: run DLC + step 1: run DGP with labeled frames only + step 2: run DGP with spatial clique + step 3: do prediction on all videos +""" +import argparse +import os +from os import listdir +from os.path import isfile, join +from pathlib import Path +from joblib import Memory +import sys +import yaml +import pandas as pd +import numpy as np + +if sys.platform == 'darwin': + import wx + if int(wx.__version__[0]) > 3: + wx.Thread_IsMain = wx.IsMainThread + +os.environ["DLClight"] = "True" +os.environ["Colab"] = "True" + +from moviepy.editor import VideoFileClip + +from deeplabcut.utils import auxiliaryfunctions + +from deepgraphpose.models.fitdgp import fit_dlc, fit_dgp, fit_dgp_labeledonly +from deepgraphpose.models.fitdgp_util import get_snapshot_path +from deepgraphpose.models.eval import plot_dgp +from run_dgp_demo import update_config_files_general + +from dgp_ensembletools.models import Ensemble + +if __name__ == '__main__': + + # %% set up dlcpath for DLC project and hyperparameters + parser = argparse.ArgumentParser() + parser.add_argument( + "--modelpaths", + nargs="+", + type=str, + default=None, + help="the absolute path of the DLC projects you want to ensemble", + ) + + parser.add_argument( + "--dlcsnapshot", + type=str, + default=None, + help="use the DLC snapshot to initialize DGP", + ) + + parser.add_argument( + "--videopath", + type=str, + default=None, + help="path to video", + ) + + parser.add_argument("--shuffle", type=int, default=1, help="Project shuffle") + parser.add_argument( + "--batch_size", + type=int, + default=10, + help="size of the batch, if there are memory issues, decrease it value") + parser.add_argument("--test", action='store_true', default=False) + + input_params = parser.parse_known_args()[0] + print(input_params) + + modelpaths = input_params.modelpaths + shuffle = input_params.shuffle + videopath = input_params.videopath + dlcsnapshot = input_params.dlcsnapshot + batch_size = input_params.batch_size + test = input_params.test + + # update config files + for modelpath in modelpaths: + dlcpath = update_config_files_general(modelpath,shuffle) + update_configs = True + + ## Specifying snapshot manually at the moment assuming training. + step = 2 + snapshot = 'snapshot-step{}-final--0'.format(step) + + try: + + # -------------------------------------------------------------------------------- + # Test DGP model + # -------------------------------------------------------------------------------- + + # %% step 3 predict on all videos in videos_dgp folder + print( + ''' + ================================== + | | + | | + | Predict with DGP Ensemble | + | | + | | + ================================== + ''' + , flush=True) + + ## get ensembleparameters + topdir = os.path.dirname(modelpaths[0]) ## they're all loaded into the same anyway. + modeldirs = [os.path.basename(m) for m in modelpaths] + videoext = os.path.splitext(videopath)[-1].split(".",1)[-1] ## remove the dot as well. + video = VideoFileClip(videopath) + ## Write results to: + resultpath = os.path.join(os.path.dirname(os.path.dirname(videopath)),"results") + + + + framelength = int(video.duration*video.fps) + ## can do some processing based on length here. + framerange = range(0,framelength) + + remoteensemble = Ensemble(topdir,modeldirs,videoext,memory = Memory(os.path.dirname(videopath))) + [model.predict(videopath) for model in remoteensemble.models.values()] + predict_videoname = "_labeled".join(os.path.splitext(os.path.basename(videopath))) + predict_h5name = "_labeled".join([os.path.splitext(os.path.basename(videopath))[0],".h5"]) + consensus_videoname = "_labeled_consensus".join(os.path.splitext(os.path.basename(videopath))) + consensus_csvname = "_labeled_consensus".join([os.path.splitext(os.path.basename(videopath))[0],".csv"]) + consensus_h5name = "_labeled_consensus".join([os.path.splitext(os.path.basename(videopath))[0],".h5"]) + ## outputs pose of shape xy, time, body part + meanx,meany = remoteensemble.get_mean_pose(predict_videoname,framerange,snapshot = snapshot, shuffle = shuffle) + + ## reshape and save in shape of existing: + likearray = np.empty(meanx.shape) ## don't get likelihoods right now. + likearray[:] = np.NaN + stacked = np.stack((meanx,meany,likearray),axis = -1) + dfshaped = stacked.reshape(stacked.shape[0],stacked.shape[1]*stacked.shape[2]) + ## get sample dataframe: + sampledf = pd.read_hdf(os.path.join(modelpaths[0],"videos_pred",predict_h5name)) + sampledf.iloc[:len(dfshaped),:] = dfshaped + sampledf.drop([i for i in range(len(dfshaped),len(sampledf))],inplace = True) + + if not os.path.isdir(resultpath): + os.makedirs(resultpath) + + sampledf.to_csv(os.path.join(resultpath,consensus_csvname)) + sampledf.to_hdf(os.path.join(resultpath,consensus_h5name),key="consensus") + + ### Not writing video of consensus for now: + #if test: + # for video_file in [video_sets[0]]: + # clip =VideoFileClip(str(video_file)) + # if clip.duration > 10: + # clip = clip.subclip(10) + # video_file_name = os.path.splitext(video_file)[0] +"test"+ ".mp4" + # print('\nwriting {}'.format(video_file_name)) + # clip.write_videofile(video_file_name) + # plot_dgp(str(video_file_name), + # str(video_pred_path), + # proj_cfg_file=str(cfg_yaml), + # dgp_model_file=str(snapshot_path), + # shuffle=shuffle) + #else: + # for video_file in video_sets: + # plot_dgp(str(video_file), + # str(video_pred_path), + # proj_cfg_file=str(cfg_yaml), + # dgp_model_file=str(snapshot_path), + # shuffle=shuffle) + finally: + pass + + #if update_configs: + # return_configs() diff --git a/demo/run_dgp_demo.py b/demo/run_dgp_demo.py old mode 100644 new mode 100755 index 875a6bd..311f5cb --- a/demo/run_dgp_demo.py +++ b/demo/run_dgp_demo.py @@ -21,12 +21,69 @@ os.environ["DLClight"] = "True" os.environ["Colab"] = "True" from deeplabcut.utils import auxiliaryfunctions - +from deepgraphpose.contrib.segment_videos import split_video from deepgraphpose.models.fitdgp import fit_dlc, fit_dgp, fit_dgp_labeledonly from deepgraphpose.models.fitdgp_util import get_snapshot_path from deepgraphpose.models.eval import plot_dgp +def update_config_files_general(dlcpath,shuffle): + """General purpose version of the function below that applies to all models, not just the default reachingvideo. + Requires in addition to the dlc path parameters from the project config file: + + """ + base_path = os.getcwd() + + # project config + proj_cfg_path = os.path.join(base_path, dlcpath, 'config.yaml') + with open(proj_cfg_path, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.SafeLoader) + yaml_cfg['project_path'] = os.path.join(base_path, dlcpath) + task = yaml_cfg["Task"] + TrainingFraction = yaml_cfg["TrainingFraction"][0] ## Train with the first training fraction. + date = yaml_cfg["date"] + try: + video_locs = yaml_cfg["video_sets"] + #full_sets = {os.path.join(base_path,dlcpath,vl):cropdata for vl,cropdata in video_locs.items()} + ## TODO Test this bottom line: assume videos are correctly located in the directory's videos subdir. + full_sets = {os.path.join(base_path,dlcpath,"videos",os.path.basename(vl)):cropdata for vl,cropdata in video_locs.items()} + yaml_cfg["video_sets"] = full_sets + except KeyError: + print("no videos given.") + + # video_loc = os.path.join(base_path, dlcpath, 'videos', 'reachingvideo1.avi') + # try: + # yaml_cfg['video_sets'][video_loc] = yaml_cfg['video_sets'].pop('videos/reachingvideo1.avi') + # except KeyError: + # ## check if update has already been done. + # assert yaml_cfg["video_sets"].get(video_loc,False), "Can't find original or updated video path in config file." + with open(proj_cfg_path, 'w') as f: + yaml.dump(yaml_cfg, f) + + # train model config + projectname = "{t}{d}-trainset{tf}shuffle{s}".format(t=task,d=date,tf = int(TrainingFraction*100),s = shuffle) + model_cfg_path = get_model_cfg_path_general(base_path, dlcpath, 'train', projectname) + with open(model_cfg_path, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.SafeLoader) + yaml_cfg['init_weights'] = get_init_weights_path(base_path) + yaml_cfg['project_path'] = os.path.join(base_path, dlcpath) + with open(model_cfg_path, 'w') as f: + yaml.dump(yaml_cfg, f) + + # download resnet weights if necessary + if not os.path.exists(yaml_cfg['init_weights']): + raise FileNotFoundError('Must download resnet-50 weights; see README for instructions') + + # test model config + model_cfg_path = get_model_cfg_path_general(base_path, dlcpath, 'test', projectname) + with open(model_cfg_path, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.SafeLoader) + yaml_cfg['init_weights'] = get_init_weights_path(base_path) + with open(model_cfg_path, 'w') as f: + yaml.dump(yaml_cfg, f) + + return os.path.join(base_path, dlcpath) + def update_config_files(dlcpath): base_path = os.getcwd() @@ -34,12 +91,13 @@ def update_config_files(dlcpath): proj_cfg_path = join(base_path, dlcpath, 'config.yaml') with open(proj_cfg_path, 'r') as f: yaml_cfg = yaml.load(f, Loader=yaml.SafeLoader) - yaml_cfg['project_path'] = join(base_path, dlcpath) - video_loc = join(base_path, dlcpath, 'videos', 'reachingvideo1.avi') + yaml_cfg['project_path'] = os.path.join(base_path, dlcpath) + video_loc = os.path.join(base_path, dlcpath, 'videos', 'reachingvideo1.avi') try: - yaml_cfg['video_sets'][video_loc] = yaml_cfg['video_sets'].pop(join('videos','reachingvideo1.avi')) - except: - yaml_cfg['video_sets'][video_loc] = yaml_cfg['video_sets'].pop(video_loc) + yaml_cfg['video_sets'][video_loc] = yaml_cfg['video_sets'].pop('videos/reachingvideo1.avi') + except KeyError: + ## check if update has already been done. + assert yaml_cfg["video_sets"].get(video_loc,False), "Can't find original or updated video path in config file." with open(proj_cfg_path, 'w') as f: yaml.dump(yaml_cfg, f) @@ -104,6 +162,14 @@ def get_model_cfg_path(base_path, dtype): base_path, dlcpath, 'dlc-models', 'iteration-0', 'ReachingAug30-trainset95shuffle1', dtype, 'pose_cfg.yaml') +def get_model_cfg_path_general(base_path, dlcpath, dtype, projectname): + """General purpose version of get_model_cfg_path that can work with non-demo projects. + + """ + return os.path.join( + base_path, dlcpath, 'dlc-models', 'iteration-0', projectname, + dtype, 'pose_cfg.yaml') + def get_init_weights_path(base_path): return join( @@ -136,6 +202,8 @@ def get_init_weights_path(base_path): default=10, help="size of the batch, if there are memory issues, decrease it value") parser.add_argument("--test", action='store_true', default=False) + parser.add_argument("--split", action = "store_true",help = "whether or not we should run inference on chopped up videos") + parser.add_argument("--splitlength", default = 6000, help= "number of frames in block if splitting videos. ") input_params = parser.parse_known_args()[0] print(input_params) @@ -145,12 +213,11 @@ def get_init_weights_path(base_path): dlcsnapshot = input_params.dlcsnapshot batch_size = input_params.batch_size test = input_params.test + splitflag,splitlength = input_params.split,input_params.splitlength - update_configs = False - if dlcpath == join('data','Reaching-Mackenzie-2018-08-30'): - # update config files - dlcpath = update_config_files(dlcpath) - update_configs = True + # update config files + dlcpath = update_config_files_general(dlcpath,shuffle) + update_configs = True # ------------------------------------------------------------------------------------ # Train models @@ -283,6 +350,14 @@ def get_init_weights_path(base_path): os.makedirs(video_pred_path) print('video_sets', video_sets, flush=True) + if splitflag: + video_cut_path = str(Path(dlcpath) / 'videos_cut') + if not os.path.exists(video_cut_path): + os.makedirs(video_cut_path) + clip_sets = [] + for v in video_sets: + clip_sets.extend(split_video(v,int(splitlength),suffix = "demo",outputloc = video_cut_path)) + video_sets = clip_sets ## replace video_sets with clipped versions. if test: for video_file in [video_sets[0]]: @@ -309,6 +384,7 @@ def get_init_weights_path(base_path): dgp_model_file=str(snapshot_path), shuffle=shuffle) finally: + pass - if update_configs: - return_configs() + #if update_configs: + # return_configs() diff --git a/src/deepgraphpose/contrib/segment_videos.py b/src/deepgraphpose/contrib/segment_videos.py new file mode 100644 index 0000000..3856610 --- /dev/null +++ b/src/deepgraphpose/contrib/segment_videos.py @@ -0,0 +1,85 @@ +import os +from moviepy.editor import VideoFileClip +# Given a DLC project, check all videos and clip those that are longer than some tolerance into shorter disjoint clips. + +# Take the project, and look within for all videos that will be trained on (these come from the config file) and analyzed (these come from the folder videos_dgp). +# Those videos that have labels need to be split on the training labels as well. + +def split_video_and_trainframes(config_path,tol=5000,suffix=None): + """Splits videos and trainframes in a model config that are larger than some tolerance in frames. + + :param config_path: parameter to config file. + :param tol: tolerance in number of frames. + :param suffix: video suffix. + """ + trainvids = check_videos(config_path,tol) + analyzevids = check_analysis_videos(folder,tol) + splitlength = tol + vids = trainvids + analyzevids + for v in vids: + split_video(v,splitlength,suffix) + if v in trainvids: + format_frames(v,splitlength,suffix) + +def check_videos(config_path,tol): + """Checks all videos given in the model cfg file and checks if any are longer than the given length. + + :param config_path: parameter to config file. + :param tol: tolerance in number of frames. + """ + +def check_analysis_videos(folder_path,tol): + """Checks all videos given in the videos_dgp directory and checks if any are longer than the given length. + + :param config_path: parameter to config file. + :param tol: tolerance in number of frames. + """ + +def split_video(vidpath,splitlength,suffix = "",outputloc = None): + """splits a given video into subclips of type mp4. Note: will work best (even frames per subclip) if you pass a splitlength that is divisible by your frame rate. + + :param vidpath: path to video + :param splitlength: length to chunk into in frames + :param suffix: custom suffix to add to subclips + :param outputloc: directory to write outputs to. Default is same directory. + :returns: list of paths to new video files. + """ + try: + clip = VideoFileClip(vidpath) + except FileNotFoundError: + print("file not found.") + + duration = clip.duration + splitlength_secs = splitlength/clip.fps + viddir,vidname = os.path.dirname(vidpath),os.path.basename(vidpath) + base,ext = os.path.splitext(vidname) + subname = base+suffix+"{n}"+".mp4" + if outputloc is None: + subpath = os.path.join(viddir,subname) + else: + subpath = os.path.join(outputloc,subname) + + clipnames = [] + clipstart = 0 + clipind = 0 + while clipstart < duration: + subname = subpath.format(n=clipind) + subclip = clip.subclip(clipstart,min(duration,clipstart+splitlength_secs)) + subclip.write_videofile(subname,codec = "mpeg4") + clipnames.append(subname) + clipstart += splitlength_secs + clipind+=1 + return clipnames + + + + +def format_frames(vidpath,splitlength,suffix = None): + """reformats training frames into format that matches sublclips + + :param vidpath: path to video + :param splitlength: length to chunk into + :param suffix: custom suffix to add to subclips + """ + + diff --git a/src/deepgraphpose/preprocess/get_morig_labeled_data.py b/src/deepgraphpose/preprocess/get_morig_labeled_data.py index 65f5b95..cd219cf 100644 --- a/src/deepgraphpose/preprocess/get_morig_labeled_data.py +++ b/src/deepgraphpose/preprocess/get_morig_labeled_data.py @@ -179,7 +179,7 @@ def create_labels(task, date, overwrite_flag=False, check_labels=False, verbose= #%% -def create_labels_md(config_path, video_path, scorer, overwrite_flag=False, check_labels=False, verbose=False): +def create_labels_md(config_path, video_path, scorer, overwrite_flag=False, check_labels=False, verbose=False,seed= None): from deepgraphpose.utils_data import local_extract_frames_md from deeplabcut.utils import auxiliaryfunctions @@ -202,10 +202,12 @@ def create_labels_md(config_path, video_path, scorer, overwrite_flag=False, chec #% if cfg["numframes2pick"] is not None: + np.random.seed(seed) ## option to set seed. assert len(np.unique(frames_index_keep)) >= cfg["numframes2pick"] frames2pick = np.sort( np.random.choice(frames_index_keep, cfg["numframes2pick"], replace=False) ) + np.random.seed() ## make sure we return to randomness else: frames2pick = frames_index_keep numframes2pick += [len(frames2pick)] diff --git a/tests/test_segment_videos.py b/tests/test_segment_videos.py new file mode 100644 index 0000000..2afb479 --- /dev/null +++ b/tests/test_segment_videos.py @@ -0,0 +1,38 @@ +import pytest +from deepgraphpose.contrib.segment_videos import split_video +from moviepy.editor import VideoFileClip +import math +import os + +here = os.path.abspath(os.path.dirname(__file__)) +testmodel_path = os.path.join(here,"testmodel") + +def test_split_video(tmp_path): + output = tmp_path/"subclips" + output.mkdir() + frame_duration = 30 + vidpath = os.path.join(testmodel_path,"videos","reachingvideo1.avi") + video_locs = split_video(vidpath,frame_duration,suffix = "test",outputloc = str(output)) + + origclip = VideoFileClip(vidpath) + duration = origclip.duration*origclip.fps + assert len(video_locs) == math.ceil(duration/frame_duration) + vid_inds = [] + for vi in video_locs: + prefix = os.path.splitext(os.path.basename(vidpath))[0]+"test" + assert os.path.splitext(os.path.basename(vi))[0].startswith(prefix) + vid_inds.append(int(vi.split(prefix)[-1].split(".mp4")[0])) + sub = VideoFileClip(vi) + assert sub.duration*sub.fps - frame_duration < 1e-1 + assert set(vid_inds) == set(range(len(video_locs))) + + + + + + + + + + +