Skip to content

Commit

Permalink
Updated the feature extraction node and changed the messages.
Browse files Browse the repository at this point in the history
  • Loading branch information
Benteng Ma authored and tiago committed Feb 5, 2024
1 parent 47ebc7c commit e8b2012
Show file tree
Hide file tree
Showing 43 changed files with 1,039 additions and 152 deletions.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
331 changes: 329 additions & 2 deletions common/helpers/colour_estimation/src/colour_estimation/__init__.py

Large diffs are not rendered by default.

172 changes: 147 additions & 25 deletions common/helpers/colour_estimation/src/colour_estimation/rgb.py
Original file line number Diff line number Diff line change
@@ -1,42 +1,163 @@
import numpy as np

RGB_COLOURS = {
COLOURS = {
"red": [255, 0, 0],
"green": [0, 255, 0],
"blue": [0, 0, 255],
"white": [255, 255, 255],
"black": [0, 0, 0],
"yellow": [255, 255, 0],
"cyan": [0, 255, 255],
"magenta": [255, 0, 255],
"gray": [128, 128, 128],
"orange": [255, 165, 0],
"purple": [128, 0, 128],
"brown": [139, 69, 19],
"pink": [255, 182, 193],
"beige": [245, 245, 220],
"maroon": [128, 0, 0],
"olive": [128, 128, 0],
"navy": [0, 0, 128],
"lime": [50, 205, 50],
"golden": [255, 223, 0],
"teal": [0, 128, 128],
"coral": [255, 127, 80],
"salmon": [250, 128, 114],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"platinum": [229, 228, 226],
"ochre": [204, 119, 34],
"burntsienna": [233, 116, 81],
"chocolate": [210, 105, 30],
"tan": [210, 180, 140],
"ivory": [255, 255, 240],
"goldenrod": [218, 165, 32],
"orchid": [218, 112, 214],
"honey": [238, 220, 130]
}

SPESIFIC_COLOURS = {
"red": [255, 0, 0],
"green": [0, 255, 0],
"blue": [0, 0, 255],
"white": [255, 255, 255],
"black": [0, 0, 0],
"yellow": [255, 255, 0],
"cyan": [0, 255, 255],
"magenta": [255, 0, 255],
"gray": [128, 128, 128],
"orange": [255, 165, 0],
"purple": [128, 0, 128],
"brown": [139, 69, 19],
"pink": [255, 182, 193],
"beige": [245, 245, 220],
"maroon": [128, 0, 0],
"olive": [128, 128, 0],
"navy": [0, 0, 128],
"lime": [50, 205, 50],
"golden": [255, 223, 0],
"teal": [0, 128, 128],
"coral": [255, 127, 80],
"salmon": [250, 128, 114],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"platinum": [229, 228, 226],
"ochre": [204, 119, 34],
"burntsienna": [233, 116, 81],
"chocolate": [210, 105, 30],
"tan": [210, 180, 140],
"ivory": [255, 255, 240],
"goldenrod": [218, 165, 32],
"orchid": [218, 112, 214],
"honey": [238, 220, 130],
"lavender": [230, 230, 250],
"mint": [189, 252, 201],
"peach": [255, 229, 180],
"ruby": [224, 17, 95],
"indigo": [75, 0, 130],
"amber": [255, 191, 0],
"emerald": [80, 200, 120],
"sapphire": [15, 82, 186],
"aquamarine": [127, 255, 212],
"periwinkle": [204, 204, 255],
"fuchsia": [255, 0, 255],
"raspberry": [227, 11, 92],
"slate": [112, 128, 144],
"charcoal": [54, 69, 79]
}

DETAILED_COLOURS = {
"light_red": [255, 204, 204],
"bright_red": [255, 0, 0],
"dark_red": [139, 0, 0],
"light_green": [204, 255, 204],
"bright_green": [0, 255, 0],
"dark_green": [0, 100, 0],
"light_blue": [204, 204, 255],
"bright_blue": [0, 0, 255],
"dark_blue": [0, 0, 139],
"light_yellow": [255, 255, 204],
"bright_yellow": [255, 255, 0],
"dark_yellow": [204, 204, 0],
"light_cyan": [204, 255, 255],
"bright_cyan": [0, 255, 255],
"dark_cyan": [0, 139, 139],
"light_magenta": [255, 204, 255],
"bright_magenta": [255, 0, 255],
"dark_magenta": [139, 0, 139],
"light_orange": [255, 229, 204],
"bright_orange": [255, 165, 0],
"dark_orange": [255, 140, 0],
"light_purple": [229, 204, 255],
"bright_purple": [128, 0, 128],
"dark_purple": [102, 0, 102],
"light_pink": [255, 204, 229],
"bright_pink": [255, 105, 180],
"dark_pink": [255, 20, 147],
"light_brown": [210, 180, 140],
"medium_brown": [165, 42, 42],
"dark_brown": [101, 67, 33],
# ...
}

COLOUR_FAMILIES = {
"light_reds": [[255, 182, 193], [255, 192, 203], [255, 160, 122]],
"dark_reds": [[139, 0, 0], [178, 34, 34], [165, 42, 42]],
"light_blues": [[173, 216, 230], [135, 206, 250], [176, 224, 230]],
"dark_blues": [[0, 0, 139], [25, 25, 112], [0, 0, 128]],
"bluish_greens": [[102, 205, 170], [32, 178, 170], [72, 209, 204]],
"light_greens": [[144, 238, 144], [152, 251, 152], [143, 188, 143]],
"dark_greens": [[0, 100, 0], [34, 139, 34], [47, 79, 79]],
"yellows": [[255, 255, 0], [255, 255, 102], [255, 215, 0]],
"oranges": [[255, 165, 0], [255, 140, 0], [255, 69, 0]],
"purples": [[128, 0, 128], [147, 112, 219], [138, 43, 226]],
"pinks": [[255, 192, 203], [255, 182, 193], [255, 105, 180]],
"browns": [[165, 42, 42], [139, 69, 19], [160, 82, 45]],
"cyans": [[0, 255, 255], [0, 139, 139], [72, 209, 204]],
"greys": [[128, 128, 128], [169, 169, 169], [192, 192, 192]],
# ...
}

SIMPLIFIED_COLOURS = {
"red": [255, 0, 0],
"green": [0, 255, 0],
"blue": [0, 0, 255],
"white": [255, 255, 255],
"black": [0, 0, 0],
"yellow": [255, 255, 0],
"cyan": [0, 255, 255],
"magenta": [255, 0, 255],
"gray": [128, 128, 128],
"orange": [255, 165, 0],
"purple": [128, 0, 128],
"brown": [139, 69, 19],
"pink": [255, 182, 193],
"light blue": [173, 216, 230],
"dark green": [0, 100, 0],
"light gray": [211, 211, 211],
"dark red": [139, 0, 0],
"beige": [245, 245, 220],
"maroon": [128, 0, 0],
"olive": [128, 128, 0],
"navy": [0, 0, 128],
"lime": [50, 205, 50],
"golden": [255, 223, 0],
"teal": [0, 128, 128],
"coral": [255, 127, 80],
"salmon": [250, 128, 114],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"platinum": [229, 228, 226],
"ochre": [204, 119, 34],
"burntsienna": [233, 116, 81],
"chocolate": [210, 105, 30],
"tan": [210, 180, 140],
"ivory": [255, 255, 240],
"goldenrod": [218, 165, 32],
"orchid": [218, 112, 214],
"honey": [238, 220, 130]
"navy": [0, 0, 128]
}

RGB_HAIR_COLOURS = {
HAIR_COLOURS = {
'midnight black': (9, 8, 6),
'off black': (44, 34, 43),
'strong dark brown': (58, 48, 36),
Expand Down Expand Up @@ -65,4 +186,5 @@
'white blonde': (255, 24, 225),
'platinum blonde': (202, 191, 177),
'russet red': (145, 74, 67),
'terra cotta': (181, 82, 57)}
'terra cotta': (181, 82, 57)
}
167 changes: 167 additions & 0 deletions common/helpers/torch_module/src/torch_module/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,170 @@ def forward(self, x):
if self.sigmoid:
x = torch.sigmoid(x)
return x


def x2conv(in_channels, out_channels, inner_channels=None):
inner_channels = out_channels // 2 if inner_channels is None else inner_channels
down_conv = nn.Sequential(
nn.Conv2d(in_channels, inner_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(inner_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inner_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
return down_conv


class Encoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(Encoder, self).__init__()
self.down_conv = x2conv(in_channels, out_channels)
self.pool = nn.MaxPool2d(kernel_size=2, ceil_mode=True)

def forward(self, x):
x = self.down_conv(x)
x = self.pool(x)
return x


class Decoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(Decoder, self).__init__()
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.up_conv = x2conv(in_channels, out_channels)

def forward(self, x_copy, x, interpolate=True):
x = self.up(x)

if (x.size(2) != x_copy.size(2)) or (x.size(3) != x_copy.size(3)):
if interpolate:
# Iterpolating instead of padding
x = F.interpolate(x, size=(x_copy.size(2), x_copy.size(3)),
mode="bilinear", align_corners=True)
else:
# Padding in case the incomping volumes are of different sizes
diffY = x_copy.size()[2] - x.size()[2]
diffX = x_copy.size()[3] - x.size()[3]
x = F.pad(x, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))

# Concatenate
x = torch.cat([x_copy, x], dim=1)
x = self.up_conv(x)
return x


class UNetWithResnet18Encoder(nn.Module):
class Decoder(nn.Module):
def __init__(self, in_channels, skip_channels, out_channels):
super(UNetWithResnet18Encoder.Decoder, self).__init__()
self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.up_conv = x2conv(out_channels + skip_channels, out_channels)

def forward(self, x_copy, x):
x = self.up(x)
if x.size(2) != x_copy.size(2) or x.size(3) != x_copy.size(3):
x = F.interpolate(x, size=(x_copy.size(2), x_copy.size(3)), mode='bilinear', align_corners=True)
x = torch.cat((x_copy, x), dim=1)
x = self.up_conv(x)
return x

def __init__(self, num_classes, in_channels=3, freeze_bn=False, sigmoid=True):
super(UNetWithResnet18Encoder, self).__init__()
self.sigmoid = sigmoid
resnet18 = models.resnet18(pretrained=True)

if in_channels != 3:
resnet18.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)

self.encoder1 = nn.Sequential(resnet18.conv1, resnet18.bn1, resnet18.relu)
self.encoder2 = resnet18.layer1
self.encoder3 = resnet18.layer2
self.encoder4 = resnet18.layer3
self.encoder5 = resnet18.layer4

self.up1 = UNetWithResnet18Encoder.Decoder(512, 256, 256)
self.up2 = UNetWithResnet18Encoder.Decoder(256, 128, 128)
self.up3 = UNetWithResnet18Encoder.Decoder(128, 64, 64)
self.up4 = UNetWithResnet18Encoder.Decoder(64, 64, 64)

self.final_conv = nn.Conv2d(64, num_classes, kernel_size=1)
self._initialize_weights()

if freeze_bn:
self.freeze_bn()

def _initialize_weights(self):
for module in self.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()

def forward(self, x):
x1 = self.encoder1(x)
x2 = self.encoder2(x1)
x3 = self.encoder3(x2)
x4 = self.encoder4(x3)
x5 = self.encoder5(x4)

x = self.up1(x4, x5)
x = self.up2(x3, x)
x = self.up3(x2, x)
x = self.up4(x1, x)
x = F.interpolate(x, size=(x.size(2)*2, x.size(3)*2), mode='bilinear', align_corners=True)

x = self.final_conv(x)

if self.sigmoid:
x = torch.sigmoid(x)
return x

def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()


class MultiLabelResNet(nn.Module):
def __init__(self, num_labels, input_channels=3, sigmoid=True, pretrained=True,):
super(MultiLabelResNet, self).__init__()
self.model = models.resnet18(pretrained=pretrained)
self.sigmoid = sigmoid

if input_channels != 3:
self.model.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)

num_ftrs = self.model.fc.in_features

self.model.fc = nn.Linear(num_ftrs, num_labels)

def forward(self, x):
x = self.model(x)
if self.sigmoid:
x = torch.sigmoid(x)
return x


class CombinedModel(nn.Module):
def __init__(self, segment_model: nn.Module, predict_model: nn.Module, cat_layers:int=None):
super(CombinedModel, self).__init__()
self.segment_model = segment_model
self.predict_model = predict_model
self.cat_layers = cat_layers

def forward(self, x: torch.Tensor):
seg_masks = self.segment_model(x)

if self.cat_layers:
seg_masks_ = seg_masks[:, 0:self.cat_layers]
x = torch.cat((x, seg_masks_), dim=1)
else:
x = torch.cat((x, seg_masks), dim=1)

logic_outputs = self.predict_model(x)
return seg_masks, logic_outputs

5 changes: 4 additions & 1 deletion common/vision/lasr_vision_msgs/msg/FeatureWithColour.msg
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Feature name
string name

# Feature label
bool label

# Colour predictions
lasr_vision_msgs/ColourPrediction[] colours
string[] colours
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,5 @@ string torso_mask_dtype
---

# Detection result
lasr_vision_msgs/FeatureWithColour[] detected_features
lasr_vision_msgs/FeatureWithColour[] detected_features
# string detected_features
Loading

0 comments on commit e8b2012

Please sign in to comment.