Skip to content

Commit

Permalink
- do not generate and load all datasets to save memory for larger tes…
Browse files Browse the repository at this point in the history
…t sets
  • Loading branch information
dsmic committed Aug 23, 2019
1 parent 871f24a commit 2850bf8
Showing 1 changed file with 3 additions and 5 deletions.
8 changes: 3 additions & 5 deletions few_shot_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,20 +61,18 @@ def idx_to_big(self, phase, idx):
cathegories = 5
dataloader = OurMiniImageNetDataLoader(shot_num=5 * 2, way_num=cathegories, episode_test_sample_num=args.episode_test_sample_num, shuffle_images = args.shuffle_images) #twice shot_num is because one might be uses as the base for the samples

dataloader.generate_data_list(phase='train')
dataloader.generate_data_list(phase='val')
dataloader.generate_data_list(phase='test')
dataloader.generate_data_list(phase=args.dataset)

print('mode is',args.dataset)
dataloader.load_list('all')
dataloader.load_list(args.dataset)

#print('train',dataloader.train_filenames)
#print('val',dataloader.val_filenames)
#print('test',dataloader.test_filenames)


base_train_img, base_train_label, base_test_img, base_test_label = \
dataloader.get_batch(phase='train', idx=0)
dataloader.get_batch(phase=args.dataset, idx=0)

train_epoch_size = base_train_img.shape[0]
if not args.train_indep_and_dependent:
Expand Down

0 comments on commit 2850bf8

Please sign in to comment.