-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdatasets.py
71 lines (56 loc) · 2.29 KB
/
datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
"""datasets.py"""
from pathlib import Path
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10, LSUN, ImageFolder
__datasets__ = ['cifar10', 'celeba', 'lsun']
def is_power_of_2(num):
return ((num & (num - 1)) == 0) and num != 0
def return_data(args):
name = args.dataset
dset_dir = args.dset_dir
batch_size = args.batch_size
num_workers = args.num_workers
image_size = args.image_size
if not is_power_of_2(image_size) or image_size < 32:
raise ValueError('image size should be 32, 64, 128, ...')
transform = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
if name.lower() == 'cifar10':
root = Path(dset_dir).joinpath('CIFAR10')
train_kwargs = {'root':root, 'train':True, 'transform':transform, 'download':True}
dset = CIFAR10
elif name.lower() == 'celeba':
root = Path(dset_dir).joinpath('CelebA')
train_kwargs = {'root':root, 'transform':transform}
dset = ImageFolder
elif name.lower() == 'lsun':
raise NotImplementedError('{} is not supported yet'.format(name))
root = Path(dset_dir).joinpath('LSUN')
train_kwargs = {'root':str(root), 'classes':'train', 'transform':transform}
dset = LSUN
else:
root = Path(dset_dir).joinpath(name)
train_kwargs = {'root':root, 'transform':transform}
dset = ImageFolder
train_data = dset(**train_kwargs)
train_loader = DataLoader(train_data,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True)
data_loader = dict()
data_loader['train'] = train_loader
return data_loader
if __name__ == '__main__':
import argparse
#os.chdir('..')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='MNIST')
parser.add_argument('--dset_dir', type=str, default='data')
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
data_loader = return_data(args)