-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathdiscrete_encoders.py
110 lines (81 loc) · 3.3 KB
/
discrete_encoders.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import numpy as np
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
class IdEncoder(nn.Module):
def __init__(self):
super(IdEncoder,self).__init__()
def encode(self,x):
return x
class HTEncoder(nn.Module):
def __init__(self, load_dir):
super(HTEncoder,self).__init__()
self.emb_mean = torch.from_numpy(np.load(load_dir + 'emb_mean.npy')).cuda()
self.emb_mean = self.emb_mean.view(1,4096)
def encode(self,x):
# return (x>self.emb_mean).float().cuda()
return (x>0.07).float().cuda()
class RandomEncoder(nn.Module):
def __init__(self,dim,load_dir):
super(RandomEncoder,self).__init__()
self.project_mat = torch.randn(4096,dim).cuda()
self.emb_mean = torch.from_numpy(np.load(load_dir + 'emb_mean.npy')).cuda()
self.emb_mean = self.emb_mean.view(1,4096)
def encode(self, x):
random_project_emb = torch.matmul(x - self.emb_mean, self.project_mat)
discrete_emb = (random_project_emb > 0.).float().cuda()
return random_project_emb + (discrete_emb-random_project_emb).detach()
class PCAEncoder(nn.Module):
def __init__(self,dim,load_dir):
super(PCAEncoder,self).__init__()
np_project_mat = np.load(load_dir + 'trans_mat.npy')
self.project_mat = torch.from_numpy(np_project_mat[:,:dim]).cuda()
self.emb_mean = torch.from_numpy(np.load(load_dir + 'emb_mean.npy')).cuda()
self.emb_mean = self.emb_mean.view(1,4096)
def encode(self, x):
pca_emb = torch.matmul(x - self.emb_mean, self.project_mat)
discrete_emb = (pca_emb > 0.).float().cuda()
return pca_emb + (discrete_emb-pca_emb).detach()
class LinearAutoEncoder(nn.Module):
def __init__(self, dim):
super(LinearAutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(2* 2048, dim),
)
self.decoder = nn.Sequential(
nn.Linear(dim, 2*2048)
)
def forward(self, x):
logits = self.encoder(x)
latent_code = (logits>0.).float().cuda()
to_decoder = logits+(latent_code-logits).detach()
predict = self.decoder(2.*to_decoder-1.)
return predict
def encode(self, x):
logits = self.encoder(x)
latent_code = (logits>0.).float().cuda()
to_decoder = logits+(latent_code-logits).detach()
return to_decoder
class NonlinearAutoEncoder(nn.Module):
def __init__(self, dim, fc_dim=2048): #fc_dim for dimension of fully-connect layers
super(NonlinearAutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(2* 2048, fc_dim),
nn.Tanh(),
nn.Linear(fc_dim, dim)
)
self.decoder = nn.Sequential(
nn.Linear(dim, 2*2048)
)
def forward(self, x):
logits = self.encoder(x)
latent_code = (logits>0.).float().cuda()
to_decoder = logits+(latent_code-logits).detach()
predict = self.decoder(2.*to_decoder-1.)
return predict
def encode(self, x):
logits = self.encoder(x)
latent_code = (logits>0.).float().cuda()
to_decoder = logits+(latent_code-logits).detach()
return to_decoder