add pose_n_num_encoder
This commit is contained in:
20
modules/pts_num_encoder.py
Normal file
20
modules/pts_num_encoder.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from torch import nn
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
|
||||
@stereotype.module("pts_num_encoder")
|
||||
class PointsNumEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(PointsNumEncoder, self).__init__()
|
||||
self.config = config
|
||||
out_dim = config["out_dim"]
|
||||
self.act = nn.ReLU(True)
|
||||
|
||||
self.pts_num_encoder = nn.Sequential(
|
||||
nn.Linear(1, out_dim),
|
||||
self.act,
|
||||
nn.Linear(out_dim, out_dim),
|
||||
self.act,
|
||||
)
|
||||
|
||||
def encode_pts_num(self, num_seq):
|
||||
return self.pts_num_encoder(num_seq)
|
72
modules/transformer_pose_n_num_seq_encoder.py
Normal file
72
modules/transformer_pose_n_num_seq_encoder.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn.utils.rnn import pad_sequence
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
|
||||
|
||||
@stereotype.module("transformer_pose_n_num_seq_encoder")
|
||||
class TransformerPoseAndNumSequenceEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(TransformerPoseAndNumSequenceEncoder, self).__init__()
|
||||
self.config = config
|
||||
embed_dim = config["pts_num_embed_dim"] + config["pose_embed_dim"]
|
||||
encoder_layer = nn.TransformerEncoderLayer(
|
||||
d_model=embed_dim,
|
||||
nhead=config["num_heads"],
|
||||
dim_feedforward=config["ffn_dim"],
|
||||
batch_first=True,
|
||||
)
|
||||
self.transformer_encoder = nn.TransformerEncoder(
|
||||
encoder_layer, num_layers=config["num_layers"]
|
||||
)
|
||||
self.fc = nn.Linear(embed_dim, config["output_dim"])
|
||||
|
||||
def encode_sequence(self, pts_num_embedding_list_batch, pose_embedding_list_batch):
|
||||
combined_features_batch = []
|
||||
lengths = []
|
||||
|
||||
for pts_num_embedding_list, pose_embedding_list in zip(pts_num_embedding_list_batch, pose_embedding_list_batch):
|
||||
combined_features = [
|
||||
torch.cat((pts_num_embed, pose_embed), dim=-1)
|
||||
for pts_num_embed, pose_embed in zip(pts_num_embedding_list, pose_embedding_list)
|
||||
]
|
||||
combined_features_batch.append(torch.stack(combined_features))
|
||||
lengths.append(len(combined_features))
|
||||
|
||||
combined_tensor = pad_sequence(combined_features_batch, batch_first=True) # Shape: [batch_size, max_seq_len, embed_dim]
|
||||
|
||||
max_len = max(lengths)
|
||||
padding_mask = torch.tensor([([0] * length + [1] * (max_len - length)) for length in lengths], dtype=torch.bool).to(combined_tensor.device)
|
||||
|
||||
transformer_output = self.transformer_encoder(combined_tensor, src_key_padding_mask=padding_mask)
|
||||
final_feature = transformer_output.mean(dim=1)
|
||||
final_output = self.fc(final_feature)
|
||||
|
||||
return final_output
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = {
|
||||
"pts_num_embed_dim": 128,
|
||||
"pose_embed_dim": 256,
|
||||
"num_heads": 4,
|
||||
"ffn_dim": 256,
|
||||
"num_layers": 3,
|
||||
"output_dim": 2048,
|
||||
}
|
||||
|
||||
encoder = TransformerPoseAndNumSequenceEncoder(config)
|
||||
seq_len = [5, 8, 9, 4]
|
||||
batch_size = 4
|
||||
|
||||
pts_num_embedding_list_batch = [
|
||||
torch.randn(seq_len[idx], config["pts_num_embed_dim"]) for idx in range(batch_size)
|
||||
]
|
||||
pose_embedding_list_batch = [
|
||||
torch.randn(seq_len[idx], config["pose_embed_dim"]) for idx in range(batch_size)
|
||||
]
|
||||
output_feature = encoder.encode_sequence(
|
||||
pts_num_embedding_list_batch, pose_embedding_list_batch
|
||||
)
|
||||
print("Encoded Feature:", output_feature)
|
||||
print("Feature Shape:", output_feature.shape)
|
@@ -4,7 +4,7 @@ from torch.nn.utils.rnn import pad_sequence
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
|
||||
|
||||
@stereotype.module("transformer_seq_encoder")
|
||||
@stereotype.module("transformer_pose_n_pts_seq_encoder")
|
||||
class TransformerSequenceEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super(TransformerSequenceEncoder, self).__init__()
|
Reference in New Issue
Block a user