Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added path to learning statistics in /demo/README.md #4

Open
wants to merge 25 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
677977e
added path to learning statistics in demo README.
Feb 2, 2021
92a755f
added catch for if config file has already been updated.
Feb 9, 2021
0a052a0
added option to create_labels_md to set seed.
cellistigs Feb 20, 2021
beacdb0
added prediction only script. Assumes that the snapshot is indexed as…
cellistigs May 12, 2021
e931bc8
added predict file.
cellistigs May 12, 2021
06c2f96
don't restore configs.
May 12, 2021
f1845ad
changed test to write into model folder properly, but postpend test.
May 12, 2021
b9faa3b
adding updated config handling.
May 12, 2021
062d2d1
committing current state of deepgraphpose fork. needs work on the con…
May 12, 2021
f2ab7c6
general purpose handling of taskname added.
cellistigs May 12, 2021
302d982
updated dgp to accept video sets.
May 12, 2021
c7feae4
demo code now works on a non-mackenzie dataset.
May 12, 2021
1abe8f2
fixed writepaths everywhere.
May 12, 2021
f8fe65b
made demo work for general purpose model folders.
May 12, 2021
850d4ee
running prediction on remote instance.
May 24, 2021
3552a04
changed run dgp general parsing file to assume video paths are given …
May 24, 2021
8a0fd86
dgp prediction code for ensemble works now.
May 24, 2021
11b518f
make result dir if does not exist.
May 24, 2021
df9bd66
stubbed out segment_videos function.
cellistigs Aug 31, 2021
d741792
added raw video split code.
cellistigs Sep 14, 2021
a5a4bec
added option to split videos.
cellistigs Sep 14, 2021
938be7b
uncommented workflow.
cellistigs Sep 14, 2021
3c9620c
added split option.
cellistigs Sep 14, 2021
3bf0466
changed predict.
Sep 16, 2021
25d5575
changed dlcsnapshot flag to snapshot flag for overwrite. changed spli…
Sep 27, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions demo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ You can run the DGP pipeline on the example dataset provided in this repo by run
python demo/run_dgp_demo.py --dlcpath data/Reaching-Mackenzie-2018-08-30
```

The output of the pipeline, including the labeled videos and the h5/csv files with predicted trajectories will be stored in `{DGP_DIR}/data/Reaching-Mackenzie-2018-08-30/videos_pred`.
The output of the pipeline, including the labeled videos and the h5/csv files with predicted trajectories will be stored in `{DGP_DIR}/data/Reaching-Mackenzie-2018-08-30/videos_pred`. You can see information about training statistics in the file `{DGP_DIR}/data/dlc-models/iteration-0/ReachingAug30-trainset95shuffle1/train/learning_stats.csv`.

You can run the DGP pipeline on your own dataset as long as it exists in a DLC file directory structure, for example

Expand All @@ -28,4 +28,4 @@ In particular, you can use the DLC GUI to create a DLC project, label videos, an

`python {DGP_DIR}/demo/run_dgp_demo.py --dlcpath {PROJ_DIR}/task-scorer-date/ --shuffle 'the shuffle to run' --dlcsnapshot 'specify the DLC snapshot if you've already run DLC with location refinement'`

If you have not yet run DLC you can simply remove the `--dlcsnapshot` argument and DLC will automatically be fit as part of the pipeline.
If you have not yet run DLC you can simply remove the `--dlcsnapshot` argument and DLC will automatically be fit as part of the pipeline.
157 changes: 157 additions & 0 deletions demo/predict_dgp_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
# If you have collected labels using DLC's GUI you can run DGP with the following
"""Main fitting function for DGP.
step 0: run DLC
step 1: run DGP with labeled frames only
step 2: run DGP with spatial clique
step 3: do prediction on all videos
"""
import argparse
import os
from os import listdir
from os.path import isfile, join
from pathlib import Path
import sys
import yaml

if sys.platform == 'darwin':
import wx
if int(wx.__version__[0]) > 3:
wx.Thread_IsMain = wx.IsMainThread

os.environ["DLClight"] = "True"
os.environ["Colab"] = "True"
from deeplabcut.utils import auxiliaryfunctions

from deepgraphpose.models.fitdgp import fit_dlc, fit_dgp, fit_dgp_labeledonly
from deepgraphpose.models.fitdgp_util import get_snapshot_path
from deepgraphpose.models.eval import plot_dgp
from deepgraphpose.contrib.segment_videos import split_video
from run_dgp_demo import update_config_files_general

if __name__ == '__main__':

# %% set up dlcpath for DLC project and hyperparameters
parser = argparse.ArgumentParser()
parser.add_argument(
"--dlcpath",
type=str,
default=None,
help="the absolute path of the DLC project",
)

parser.add_argument(
"--snapshot",
type=str,
default=None,
help="use snapshot for prediction. If not given, assumes `snapshot-step2-final--0`",
)

parser.add_argument("--shuffle", type=int, default=1, help="Project shuffle")
parser.add_argument(
"--batch_size",
type=int,
default=10,
help="size of the batch, if there are memory issues, decrease it value")
parser.add_argument("--test", action='store_true', default=False)

parser.add_argument("--split", action = "store_true",help = "whether or not we should run inference on chopped up videos")
parser.add_argument("--splitlength", default = 6000, help= "number of frames in block if splitting videos. ")

input_params = parser.parse_known_args()[0]
print(input_params)

dlcpath = input_params.dlcpath
shuffle = input_params.shuffle
if input_params.snapshot is not None:
snapshot = input_params.snapshot
else:
## Specifying snapshot as the end product of training.
step = 2
snapshot = 'snapshot-step{}-final--0'.format(step)

batch_size = input_params.batch_size
test = input_params.test
splitflag,splitlength = input_params.split,input_params.splitlength


# update config files
dlcpath = update_config_files_general(dlcpath,shuffle)
update_configs = True


try:

# --------------------------------------------------------------------------------
# Test DGP model
# --------------------------------------------------------------------------------

# %% step 3 predict on all videos in videos_dgp folder
print(
'''
==========================
| |
| |
| Predict with DGP |
| |
| |
==========================
'''
, flush=True)

snapshot_path, cfg_yaml = get_snapshot_path(snapshot, dlcpath, shuffle=shuffle)
cfg = auxiliaryfunctions.read_config(cfg_yaml)

video_path = str(Path(dlcpath) / 'videos_dgp')
if not (os.path.exists(video_path)):
print(video_path + " does not exist!")
video_sets = list(cfg['video_sets'])
else:
video_sets = [
video_path + '/' + f for f in listdir(video_path)
if isfile(join(video_path, f)) and (
f.find('avi') > 0 or f.find('mp4') > 0 or f.find('mov') > 0 or f.find(
'mkv') > 0)
]

video_pred_path = str(Path(dlcpath) / 'videos_pred')
if not os.path.exists(video_pred_path):
os.makedirs(video_pred_path)

if splitflag:
video_cut_path = str(Path(dlcpath) / 'videos_cut')
if not os.path.exists(video_cut_path):
os.makedirs(video_cut_path)
clip_sets = []
for v in video_sets:
clip_sets.extend(split_video(v,int(splitlength),suffix = "demo",outputloc = video_cut_path))
else:
clip_sets = [os.path.join(video_cut_path,v) for v in os.listdir(video_cut_path)]
video_sets = clip_sets ## replace video_sets with clipped versions.
print('video_sets', video_sets, flush=True)

if test:
for video_file in [video_sets[0]]:
from moviepy.editor import VideoFileClip
clip =VideoFileClip(str(video_file))
if clip.duration > 10:
clip = clip.subclip(10)
video_file_name = os.path.splitext(video_file)[0] +"test"+ ".mp4"
print('\nwriting {}'.format(video_file_name))
clip.write_videofile(video_file_name)
plot_dgp(str(video_file_name),
str(video_pred_path),
proj_cfg_file=str(cfg_yaml),
dgp_model_file=str(snapshot_path),
shuffle=shuffle)
else:
for video_file in video_sets:
plot_dgp(str(video_file),
str(video_pred_path),
proj_cfg_file=str(cfg_yaml),
dgp_model_file=str(snapshot_path),
shuffle=shuffle)
finally:
pass

#if update_configs:
# return_configs()
175 changes: 175 additions & 0 deletions demo/predict_dgp_ensemble.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
# If you have collected labels using DLC's GUI you can run DGP with the following
"""Ensemble fitting function for DGP.
step 0: run DLC
step 1: run DGP with labeled frames only
step 2: run DGP with spatial clique
step 3: do prediction on all videos
"""
import argparse
import os
from os import listdir
from os.path import isfile, join
from pathlib import Path
from joblib import Memory
import sys
import yaml
import pandas as pd
import numpy as np

if sys.platform == 'darwin':
import wx
if int(wx.__version__[0]) > 3:
wx.Thread_IsMain = wx.IsMainThread

os.environ["DLClight"] = "True"
os.environ["Colab"] = "True"

from moviepy.editor import VideoFileClip

from deeplabcut.utils import auxiliaryfunctions

from deepgraphpose.models.fitdgp import fit_dlc, fit_dgp, fit_dgp_labeledonly
from deepgraphpose.models.fitdgp_util import get_snapshot_path
from deepgraphpose.models.eval import plot_dgp
from run_dgp_demo import update_config_files_general

from dgp_ensembletools.models import Ensemble

if __name__ == '__main__':

# %% set up dlcpath for DLC project and hyperparameters
parser = argparse.ArgumentParser()
parser.add_argument(
"--modelpaths",
nargs="+",
type=str,
default=None,
help="the absolute path of the DLC projects you want to ensemble",
)

parser.add_argument(
"--dlcsnapshot",
type=str,
default=None,
help="use the DLC snapshot to initialize DGP",
)

parser.add_argument(
"--videopath",
type=str,
default=None,
help="path to video",
)

parser.add_argument("--shuffle", type=int, default=1, help="Project shuffle")
parser.add_argument(
"--batch_size",
type=int,
default=10,
help="size of the batch, if there are memory issues, decrease it value")
parser.add_argument("--test", action='store_true', default=False)

input_params = parser.parse_known_args()[0]
print(input_params)

modelpaths = input_params.modelpaths
shuffle = input_params.shuffle
videopath = input_params.videopath
dlcsnapshot = input_params.dlcsnapshot
batch_size = input_params.batch_size
test = input_params.test

# update config files
for modelpath in modelpaths:
dlcpath = update_config_files_general(modelpath,shuffle)
update_configs = True

## Specifying snapshot manually at the moment assuming training.
step = 2
snapshot = 'snapshot-step{}-final--0'.format(step)

try:

# --------------------------------------------------------------------------------
# Test DGP model
# --------------------------------------------------------------------------------

# %% step 3 predict on all videos in videos_dgp folder
print(
'''
==================================
| |
| |
| Predict with DGP Ensemble |
| |
| |
==================================
'''
, flush=True)

## get ensembleparameters
topdir = os.path.dirname(modelpaths[0]) ## they're all loaded into the same anyway.
modeldirs = [os.path.basename(m) for m in modelpaths]
videoext = os.path.splitext(videopath)[-1].split(".",1)[-1] ## remove the dot as well.
video = VideoFileClip(videopath)
## Write results to:
resultpath = os.path.join(os.path.dirname(os.path.dirname(videopath)),"results")



framelength = int(video.duration*video.fps)
## can do some processing based on length here.
framerange = range(0,framelength)

remoteensemble = Ensemble(topdir,modeldirs,videoext,memory = Memory(os.path.dirname(videopath)))
[model.predict(videopath) for model in remoteensemble.models.values()]
predict_videoname = "_labeled".join(os.path.splitext(os.path.basename(videopath)))
predict_h5name = "_labeled".join([os.path.splitext(os.path.basename(videopath))[0],".h5"])
consensus_videoname = "_labeled_consensus".join(os.path.splitext(os.path.basename(videopath)))
consensus_csvname = "_labeled_consensus".join([os.path.splitext(os.path.basename(videopath))[0],".csv"])
consensus_h5name = "_labeled_consensus".join([os.path.splitext(os.path.basename(videopath))[0],".h5"])
## outputs pose of shape xy, time, body part
meanx,meany = remoteensemble.get_mean_pose(predict_videoname,framerange,snapshot = snapshot, shuffle = shuffle)

## reshape and save in shape of existing:
likearray = np.empty(meanx.shape) ## don't get likelihoods right now.
likearray[:] = np.NaN
stacked = np.stack((meanx,meany,likearray),axis = -1)
dfshaped = stacked.reshape(stacked.shape[0],stacked.shape[1]*stacked.shape[2])
## get sample dataframe:
sampledf = pd.read_hdf(os.path.join(modelpaths[0],"videos_pred",predict_h5name))
sampledf.iloc[:len(dfshaped),:] = dfshaped
sampledf.drop([i for i in range(len(dfshaped),len(sampledf))],inplace = True)

if not os.path.isdir(resultpath):
os.makedirs(resultpath)

sampledf.to_csv(os.path.join(resultpath,consensus_csvname))
sampledf.to_hdf(os.path.join(resultpath,consensus_h5name),key="consensus")

### Not writing video of consensus for now:
#if test:
# for video_file in [video_sets[0]]:
# clip =VideoFileClip(str(video_file))
# if clip.duration > 10:
# clip = clip.subclip(10)
# video_file_name = os.path.splitext(video_file)[0] +"test"+ ".mp4"
# print('\nwriting {}'.format(video_file_name))
# clip.write_videofile(video_file_name)
# plot_dgp(str(video_file_name),
# str(video_pred_path),
# proj_cfg_file=str(cfg_yaml),
# dgp_model_file=str(snapshot_path),
# shuffle=shuffle)
#else:
# for video_file in video_sets:
# plot_dgp(str(video_file),
# str(video_pred_path),
# proj_cfg_file=str(cfg_yaml),
# dgp_model_file=str(snapshot_path),
# shuffle=shuffle)
finally:
pass

#if update_configs:
# return_configs()
Loading