-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.py
87 lines (61 loc) · 2.15 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os
class TIMITConfig(object):
# path to the unzuipped TIMIT data folder
data_path = '/home/shangeth/DATASET/TIMIT/wav_data'
# path to csv file containing age, heights of timit speakers
speaker_csv_path = os.path.join(str(os.getcwd()), 'src/Dataset/data_info_height_age.csv')
# length of wav files for training and testing
timit_wav_len = 3 * 16000
# 16000 * 2
batch_size = 150
epochs = 200
# loss = alpha * height_loss + beta * age_loss + gamma * gender_loss
alpha = 1
beta = 1
gamma = 1
# training type - AHG/H
training_type = 'H'
# data type - raw/spectral
data_type = 'spectral'
# model type
## AHG
# wav2vecLSTMAttn/spectralCNNLSTM/MultiScale
## H
# wav2vecLSTMAttn/MultiScale/LSTMAttn
model_type = 'MultiScale'
# hidden dimension of LSTM and Dense Layers
hidden_size = 128
# No of GPUs for training and no of workers for datalaoders
gpu = '-1'
n_workers = 4
# model checkpoint to continue from
model_checkpoint = None
# noise dataset for augmentation
noise_dataset_path = '/home/shangeth/noise_dataset'
# LR of optimizer
lr = 1e-3
run_name = data_type + '_' + training_type + '_' + model_type
class NISPConfig(object):
# path to the unzuipped TIMIT data folder
data_path = '/home/n1900235d/SpeakerProfiling/TimitDataset/wav_data'
# path to csv file containing age, heights of timit speakers
speaker_csv_path = '/home/shangeth/NISP/dataset/NISP-Dataset-master/total_spkrinfo.list'
# length of wav files for training and testing
timit_wav_len = 16000 * 5
batch_size = 128
epochs = 100
# loss = alpha * height_loss + beta * age_loss + gamma * gender_loss
alpha = 1
beta = 1
gamma = 1
# hidden dimension of LSTM and Dense Layers
hidden_size = 128
# No of GPUs for training and no of workers for datalaoders
gpu = '-1'
n_workers = 4
# model checkpoint to continue from
model_checkpoint = None
# noise dataset for augmentation
noise_dataset_path = '/home/n1900235d/INTERSPEECH/NoiseDataset'
# LR of optimizer
lr = 1e-3