-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathEncoder.py
More file actions
36 lines (25 loc) · 933 Bytes
/
Encoder.py
File metadata and controls
36 lines (25 loc) · 933 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import torch
import torch.nn as nn
from modules import ETransformer
from Layers import T2DCR
# input(x,m): x[C, T], C is the number of sensor.m:[C, T]为缺失数据指示,为1或者0, 0为此处数据缺失
# output: c[C, T]
class Encoder(nn.Module):
def __init__(self, C, T, en_num_layers, embed_size, heads, map_dim):
super(Encoder, self).__init__()
self._2dcr = T2DCR(C, T)
self.etransformer = ETransformer(en_num_layers, embed_size, heads, map_dim)
def forward(self, input, m):
"""处理input"""
x = m * input + (1 - m) * self._2dcr(input) # [C, T]
# Transformer提取序列信息
information = self.etransformer(x) # [C, T]
# print(information.shape)
return information
"""
x = torch.randn(2, 3)
m = torch.tensor([[1, 0, 1], [0, 0, 1]], dtype=torch.float32)
model = Encoder(2, 3, 1, 512, 4, 2048)
Y = model(x, m)
print(Y.shape)
"""