#%%
import torch
from torch import nn
from torchvision.transforms import ToTensor
import torch.nn.functional as F
import math
import torchsummary
#%%
def conv1d():
model = nn.Sequential(
nn.Conv1d(3, 32, kernel_size=3, stride=1, padding='same'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
nn.Conv1d(32, 64, kernel_size=3, stride=1, padding='same'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
nn.Conv1d(64, 128, kernel_size=3, stride=1, padding='same'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
)
return model
class MultiCNN(nn.Module):
def __init__(self):
super(MultiCNN, self).__init__()
self.conv_gra = conv1d()
self.conv_la = conv1d()
self.conv_gyro = conv1d()
self.conv_mag = conv1d()
self.final_linear = nn.Sequential(
nn.Linear(128*4, 128),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 7),
)
def forward(self, x):
out1 = self.conv_gra(x[:,:,:3])
out2 = self.conv_la(x[:,:,3:6])
out3 = self.conv_gyro(x[:,:,6:9])
out4 = self.conv_mag(x[:,:,9:12])
out1 = out1.view(128, -1)
out2 = out2.view(128, -1)
out3 = out3.view(128, -1)
out4 = out4.view(128, -1)
total_out = torch.cat((out1,out2,out3,out4), dim=1)
total_out = self.final_linear(total_out)
return total_out
# %%
Error message Below RuntimeError: Given groups=1, weight of size [32, 3, 3], expected input[2, 300, 3] to have 3 channels, but got 300 channels instead Output is truncated. View as a scrollable element or open in a text editor. Adjust cell output settings...
I designed Multi CNN for Sequence data (for 4 sensor data)
Input size is (128, 300, 3) = (batch_size, time_sequence, input_features) but I don't know how can I solve this problem..
Plus, Why weight of size [32,3,3]?! Is this 1D Convolution??
Please any comments to me..!