-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathhparams.py
72 lines (53 loc) · 2.28 KB
/
hparams.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from copy import deepcopy
_init = True
class Options(dict):
def __getitem__(self, key):
if _init and not key in self.keys():
self.__setitem__(key, Options())
return super().__getitem__(key)
def __getattr__(self, attr):
if _init and not attr in self.keys():
self[attr] = Options()
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def __delattr__(self, attr):
del self[attr]
def __deepcopy__(self, memo=None):
new = Options()
for key in self.keys():
new[key] = deepcopy(self[key])
return new
baseline = Options()
baseline.max_epochs = 100
baseline.batch_size = 32
baseline.learning_rate = 1e-4
baseline.input.bert_dim = 768
baseline.input.lst_dim = 1
baseline.local_encoder.input_dim = baseline.input.bert_dim + baseline.input.lst_dim
baseline.local_encoder.prenet.sizes = [256, 128]
baseline.local_encoder.cbhg.dim = 128
baseline.local_encoder.cbhg.K = 16
baseline.local_encoder.cbhg.projections = [128, 128]
baseline.local_encoder.output_dim = baseline.local_encoder.cbhg.dim * 2
baseline.local_text_encoder = deepcopy(baseline.local_encoder)
baseline.local_text_encoder.input_dim = baseline.input.bert_dim
baseline.attention.dim = 128
baseline.attention.k1_dim = baseline.local_text_encoder.output_dim
baseline.attention.k2_dim = baseline.attention.k1_dim
baseline.lst_linear_1.input_dim = baseline.local_encoder.output_dim + baseline.local_text_encoder.output_dim
baseline.lst_linear_1.output_dim = baseline.input.lst_dim
baseline.lst_linear_2 = deepcopy(baseline.lst_linear_1)
proposed = deepcopy(baseline)
proposed.input.sbert_dim = 768
proposed.input.gst_dim = 40
proposed.input.lst_dim = 40
proposed.local_encoder.input_dim = proposed.input.bert_dim + proposed.input.lst_dim
#proposed.attention.preserved_k1_dim = proposed.local_encoder.output_dim
#proposed.attention.preserved_k2_dim = proposed.attention.preserved_k1_dim
proposed.gst_linear_1.input_dim = 2 * proposed.input.sbert_dim + proposed.input.gst_dim
proposed.gst_linear_1.output_dim = proposed.input.gst_dim
proposed.gst_linear_2 = deepcopy(proposed.gst_linear_1)
proposed.lst_linear_1.output_dim = proposed.input.lst_dim
proposed.lst_linear_2.output_dim = proposed.input.lst_dim
_init = False