Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

incorporate tm cleaning into the hierarchy building process #45

Merged
merged 1 commit into from
Jun 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
188 changes: 188 additions & 0 deletions bddl/data_generation/parse_tm_cleaning_csv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
import csv
import json
from enum import IntEnum
from bddl.object_taxonomy import ObjectTaxonomy
import pathlib

# Specific methods for applying / removing particles
class ParticleModifyMethod(IntEnum):
ADJACENCY = 0
PROJECTION = 1


# Specific condition types for applying / removing particles
class ParticleModifyCondition(IntEnum):
FUNCTION = 0
SATURATED = 1
TOGGLEDON = 2
GRAVITY = 3

PREDICATE_MAPPING = {
"saturated": ParticleModifyCondition.SATURATED,
"toggled_on": ParticleModifyCondition.TOGGLEDON,
"function": ParticleModifyCondition.FUNCTION,
}

PARTICLE_SOURCE_MAPPING = {
"bathtub.n.01": "water",
"bidet.n.01": "water",
"sink.n.01": "water",
"soap_dispenser.n.01": "liquid_soap",
"tub.n.02": "water",
"watering_can.n.01": "water",
"squeeze_bottle.n.01": "water",
}


def parse_predicate(predicate):
pred_type = PREDICATE_MAPPING[predicate.split(" ")[0]]
if pred_type == ParticleModifyCondition.SATURATED:
cond = (pred_type, predicate.split(" ")[1].split(".")[0])
elif pred_type == ParticleModifyCondition.TOGGLEDON:
cond = (pred_type, True)
elif pred_type == ParticleModifyCondition.FUNCTION:
raise ValueError("Not supported")
else:
raise ValueError(f"Unsupported condition type: {pred_type}")
return cond


def parse_conditions_entry(unparsed_conditions):
print(f"Parsing: {unparsed_conditions}")
if unparsed_conditions.isnumeric():
always_true = bool(int(unparsed_conditions))
conditions = [] if always_true else None
else:
conditions = [parse_predicate(predicate=pred) for pred in unparsed_conditions.lower().split(" or ")]
return conditions

def parse_tm_cleaning_csv():
synset_cleaning_mapping = dict()

PROP_PARAM_ANNOTS_DIR = pathlib.Path(__file__).parents[1] / "generated_data" / "prop_param_annots"
TM_CLEANING_FILE = PROP_PARAM_ANNOTS_DIR / "tm_cleaning.csv"
REMOVER_SYNSET_MAPPING = pathlib.Path(__file__).parents[1] / "generated_data" / "remover_synset_mapping.json"
OUTPUT_HIERARCHY_PROPERTIES = pathlib.Path(__file__).parents[1] / "generated_data" / "output_hierarchy_properties.json"

rows = []
with open(TM_CLEANING_FILE) as csvfile:
reader = csv.reader(csvfile, delimiter=",", quotechar='"')
for row in reader:
rows.append(row)

# Remove first row
header, rows = rows[1], rows[2:]

start_idx = 0
for idx, head in enumerate(header):
if head == "water.n.06":
start_idx = idx
break
assert start_idx != 0

for row in rows:
synset_entry = row[start_idx - 4]
synset = synset_entry.split(" ")[0]

if synset == "":
break

if "not particleremover" in synset_entry.lower():
continue

default_visual_conditions = parse_conditions_entry(row[start_idx - 2])
default_physical_conditions = parse_conditions_entry(row[start_idx - 1])

remover_kwargs = {
"conditions": dict(),
"default_physical_conditions": default_physical_conditions,
"default_visual_conditions": default_visual_conditions,
"method": ParticleModifyMethod.PROJECTION if "vacuum" in synset.lower() else ParticleModifyMethod.ADJACENCY,
}

for idx, substance_synset in enumerate(header[start_idx:]):
# Grab condition
conditions = parse_conditions_entry(row[start_idx + idx])
if conditions is not None:
og_cat = substance_synset.split(".")[0]
remover_kwargs["conditions"][og_cat] = conditions

synset_cleaning_mapping[synset] = remover_kwargs

ot = ObjectTaxonomy()
pruned_synset_cleaning_mapping = dict()
for synset, remover_kwargs in synset_cleaning_mapping.items():
if not ot.is_valid_synset(synset):
continue
leaf_synsets = ot.get_leaf_descendants(synset=synset)
leaf_synsets = [synset] if len(leaf_synsets) == 0 else leaf_synsets
for leaf_synset in leaf_synsets:
abilities = ot.get_abilities(leaf_synset)
if "particleRemover" in abilities:
pruned_synset_cleaning_mapping[leaf_synset] = remover_kwargs


with open(REMOVER_SYNSET_MAPPING, "w+") as f:
json.dump(synset_cleaning_mapping, f, indent=2)


# Modify the output hierarchy properties
with open(OUTPUT_HIERARCHY_PROPERTIES, "r") as f:
ohp = json.load(f)

not_annotated_removers = set()

def find_and_replace_synsets_recursively(ohp_root):
# global pruned_synset_cleaning_mapping, not_annotated_removers
if isinstance(ohp_root, dict):
# Leaf node
if "name" in ohp_root.keys() and "children" not in ohp_root.keys():
name = ohp_root["name"]
# Make sure particleRemover annotation aligns
if "particleRemover" in ohp_root["abilities"]:
if name not in pruned_synset_cleaning_mapping:
print(f"no particleRemover annotated for {name}")
not_annotated_removers.add(name)
if "particleSink" in ohp_root["abilities"]:
print(f"Adding particleSink kwargs for: {name}")
ohp_root["abilities"]["particleSink"] = {
"conditions": {},
"default_physical_conditions": [],
"default_visual_conditions": None,
}
if "particleApplier" in ohp_root["abilities"]:
print(f"Adding particleApplier kwargs for: {name}")
# assert len(name.split("__")) > 1
system_name = name.split("__")[0]
ohp_root["abilities"]["particleApplier"] = {
"conditions": {system_name: [(ParticleModifyCondition.GRAVITY, True) if "needsOrientation" in ohp_root["abilities"] else (ParticleModifyCondition.TOGGLEDON, True)]},
"method": ParticleModifyMethod.PROJECTION,
}
if "particleSource" in ohp_root["abilities"]:
print(f"Adding particleSource kwargs for: {name}")
assert name in PARTICLE_SOURCE_MAPPING
ohp_root["abilities"]["particleSource"] = {
"conditions": {PARTICLE_SOURCE_MAPPING[name]: [(ParticleModifyCondition.GRAVITY, True) if "needsOrientation" in ohp_root["abilities"] else (ParticleModifyCondition.TOGGLEDON, True)]},
"method": ParticleModifyMethod.PROJECTION,
}
for k, v in ohp_root.items():
if k == "name" and v in pruned_synset_cleaning_mapping:
# print(f"found: {v}")
assert "children" not in ohp_root.keys()
assert "particleRemover" in ohp_root["abilities"]
ohp_root["abilities"]["particleRemover"] = pruned_synset_cleaning_mapping[v]
break
elif k == "children":
for child in v:
find_and_replace_synsets_recursively(ohp_root=child)
else:
find_and_replace_synsets_recursively(ohp_root=v)

find_and_replace_synsets_recursively(ohp_root=ohp)

with open(OUTPUT_HIERARCHY_PROPERTIES, "w+") as f:
json.dump(ohp, f, indent=2)


if __name__ == "__main__":
parse_tm_cleaning_csv()
2 changes: 2 additions & 0 deletions bddl/data_generation/pull_sheets.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
ASSETS_SHEET_KEY = "10L8wjNDvr1XYMMHas4IYYP9ZK7TfQHu--Kzoi0qhAe4"
SYNSETS_SHEET_KEY = "1eIQn1HzUJV15nCP4MqsHvrdWAV9VrKoxOqSnQxF0_1A"
SYMSET_PARAMS_SHEET_KEY = "1GXXa0uAsXiULVMELxrhFNNgjC_cBHXvj0uxT571Jnzs"
TM_SHEET_KEY = "11PsOlRYjsJ_WZZzMc6-iOi0oT_pIgKgvzXLg3kR00KM"

ALL_SHEETS = [
(ASSETS_SHEET_KEY, "Object Category Mapping", "category_mapping.csv"),
Expand All @@ -16,6 +17,7 @@
(SYMSET_PARAMS_SHEET_KEY, "heatsource", "prop_param_annots/heatSource.csv"),
(SYMSET_PARAMS_SHEET_KEY, "coldsource", "prop_param_annots/coldSource.csv"),
(SYMSET_PARAMS_SHEET_KEY, "cooking", "prop_param_annots/cooking.csv"),
(TM_SHEET_KEY, "cleaning substance-based dirtiness", "prop_param_annots/tm_cleaning.csv"),
]

def main():
Expand Down
4 changes: 4 additions & 0 deletions bddl/data_generation/run_everything.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from bddl.data_generation.get_syn_prop_annots_canonical import create_get_save_annots_canonical, create_get_save_properties_to_synsets, create_get_save_synsets_to_descriptors
from bddl.data_generation.propagate_by_intersection import create_get_save_propagated_canonical
from bddl.data_generation.process_prop_param_annots import create_get_save_propagated_annots_params
from bddl.data_generation.parse_tm_cleaning_csv import parse_tm_cleaning_csv
import pandas as pd
import csv
import nltk
Expand Down Expand Up @@ -47,6 +48,9 @@ def main():
# Add prop-param info to hierarchy
create_get_save_hierarchy_with_properties(hierarchy)

# Add TM cleaning info to hierarchy
parse_tm_cleaning_csv()

# # Create and save activity-specific hierarchies (no getting because that will get complicated)
# create_save_activity_specific_hierarchies()

Expand Down
Loading