Skip to content

Commit

Permalink
Merge pull request #1136 from Griperis/blender-4.1.1
Browse files Browse the repository at this point in the history
Blender 4.2.1
  • Loading branch information
cornerfarmer authored Sep 12, 2024
2 parents 25cb98c + 01c8a12 commit ee197a9
Show file tree
Hide file tree
Showing 40 changed files with 215 additions and 105 deletions.
2 changes: 1 addition & 1 deletion blenderproc/external/vhacd/decompose.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def convex_decomposition(obj: "MeshObject", temp_dir: str, vhacd_path: str, reso
else:
out_file_name = os.path.join(cache_dir, str(mesh_hash) + ".obj")

bpy.ops.import_scene.obj(filepath=out_file_name, axis_forward="Y", axis_up="Z")
bpy.ops.wm.obj_import(filepath=out_file_name, forward_axis="Y", up_axis="Z")
imported = bpy.context.selected_objects

# Name and transform the loaded parts
Expand Down
2 changes: 1 addition & 1 deletion blenderproc/python/camera/LensDistortionUtility.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def _internal_apply(input_image: np.ndarray) -> np.ndarray:
amount_of_output_channels = input_image.shape[2]
image_distorted = np.zeros((orig_res_y, orig_res_x, amount_of_output_channels))
used_dtpye = input_image.dtype
data = input_image.astype(np.float)
data = input_image.astype(np.float32)
# Forward mapping in order to distort the undistorted image coordinates
# and reshape the arrays into the image shape grid.
# The reference frame for coords is as in DLR CalDe etc. (the upper-left pixel center is at [0,0])
Expand Down
8 changes: 5 additions & 3 deletions blenderproc/python/loader/AMASSLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,13 +285,15 @@ def correct_materials(objects: List[MeshObject]):
skin_tone_fac = random.uniform(0.0, 1)
skin_tone_rgb = [value * skin_tone_fac for value in skin_tone_rgb]
principled_bsdf.inputs["Base Color"].default_value = mathutils.Vector([*skin_tone_rgb, 1.0])
principled_bsdf.inputs["Subsurface"].default_value = 0.2
principled_bsdf.inputs["Subsurface Color"].default_value = mathutils.Vector([*skin_tone_rgb, 1.0])

principled_bsdf.subsurface_method = "RANDOM_WALK_SKIN"
principled_bsdf.inputs["Subsurface Weight"].default_value = 1
principled_bsdf.inputs["Subsurface Scale"].default_value = 0.2
principled_bsdf.inputs["Subsurface Radius"].default_value = mathutils.Vector([1.0, 0.2, 0.1])
principled_bsdf.inputs["Subsurface IOR"].default_value = 2.5

# darker skin looks better when made less specular
principled_bsdf.inputs["Specular"].default_value = np.mean(skin_tone_rgb) / 255.0
principled_bsdf.inputs["Specular IOR Level"].default_value = np.mean(skin_tone_rgb) / 255.0

texture_nodes = material.get_nodes_with_type("ShaderNodeTexImage")
if texture_nodes and len(texture_nodes) > 1:
Expand Down
4 changes: 4 additions & 0 deletions blenderproc/python/loader/BopLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,10 @@ def load_mesh(obj_id: int, model_p: dict, bop_dataset_name: str, scale: float =
# if the object was not previously loaded - load it, if duplication is allowed - duplicate it
duplicated = model_path in _BopLoader.CACHED_OBJECTS
objs = load_obj(model_path, cached_objects=_BopLoader.CACHED_OBJECTS)
# Bop objects comes with incorrect custom normals, so remove them
for obj in objs:
obj.clear_custom_splitnormals()

assert (
len(objs) == 1
), f"Loading object from '{model_path}' returned more than one mesh"
Expand Down
2 changes: 1 addition & 1 deletion blenderproc/python/loader/CCMaterialLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def create_material(new_mat: bpy.types.Material, base_image_path: str, ambient_o
base_color = MaterialLoaderUtility.add_base_color(nodes, links, base_image_path, principled_bsdf)
collection_of_texture_nodes.append(base_color)

principled_bsdf.inputs["Specular"].default_value = 0.333
principled_bsdf.inputs["Specular IOR Level"].default_value = 0.333

ao_node = MaterialLoaderUtility.add_ambient_occlusion(nodes, links, ambient_occlusion_image_path,
principled_bsdf, base_color)
Expand Down
4 changes: 2 additions & 2 deletions blenderproc/python/loader/Front3DLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,11 +368,11 @@ def load_furniture_objs(data: dict, future_model_path: str, lamp_light_strength:

# Front3d .mtl files contain emission color which make the object mistakenly emissive
# => Reset the emission color
principled_node.inputs["Emission"].default_value[:3] = [0, 0, 0]
principled_node.inputs["Emission Color"].default_value[:3] = [0, 0, 0]

# Front3d .mtl files use Tf incorrectly, they make all materials fully transmissive
# Revert that:
principled_node.inputs["Transmission"].default_value = 0
principled_node.inputs["Transmission Weight"].default_value = 0

# For each a texture node
image_node = mat.new_node('ShaderNodeTexImage')
Expand Down
17 changes: 7 additions & 10 deletions blenderproc/python/loader/ObjectLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
from blenderproc.python.material.MaterialLoaderUtility import create as create_material


def load_obj(filepath: str, cached_objects: Optional[Dict[str, List[MeshObject]]] = None,
use_legacy_obj_import: bool = False, **kwargs) -> List[MeshObject]:
def load_obj(filepath: str, cached_objects: Optional[Dict[str, List[MeshObject]]] = None, **kwargs) -> List[MeshObject]:
""" Import all objects for the given file and returns the loaded objects
In .obj files a list of objects can be saved in.
Expand All @@ -22,8 +21,6 @@ def load_obj(filepath: str, cached_objects: Optional[Dict[str, List[MeshObject]]
:param filepath: the filepath to the location where the data is stored
:param cached_objects: a dict of filepath to objects, which have been loaded before, to avoid reloading
(the dict is updated in this function)
:param use_legacy_obj_import: If this is true the old legacy obj importer in python is used. It is slower, but
it correctly imports the textures in the ShapeNet dataset.
:param kwargs: all other params are handed directly to the bpy loading fct. check the corresponding documentation
:return: The list of loaded mesh objects.
"""
Expand All @@ -43,11 +40,11 @@ def load_obj(filepath: str, cached_objects: Optional[Dict[str, List[MeshObject]]
# save all selected objects
previously_selected_objects = bpy.context.selected_objects
if filepath.endswith(".obj"):
# Set validate_meshes to False per default to be backwards compatible
if "validate_meshes" not in kwargs:
kwargs["validate_meshes"] = False
# load an .obj file:
if use_legacy_obj_import:
bpy.ops.import_scene.obj(filepath=filepath, **kwargs)
else:
bpy.ops.wm.obj_import(filepath=filepath, **kwargs)
bpy.ops.wm.obj_import(filepath=filepath, **kwargs)
elif filepath.endswith(".ply"):
PLY_TEXTURE_FILE_COMMENT = "comment TextureFile "
model_name = os.path.basename(filepath)
Expand Down Expand Up @@ -78,11 +75,11 @@ def load_obj(filepath: str, cached_objects: Optional[Dict[str, List[MeshObject]]
file.write(new_ply_file_content)

# Load .ply mesh
bpy.ops.import_mesh.ply(filepath=tmp_ply_file, **kwargs)
bpy.ops.wm.ply_import(filepath=tmp_ply_file, **kwargs)

else: # If no texture was given
# load a .ply mesh
bpy.ops.import_mesh.ply(filepath=filepath, **kwargs)
bpy.ops.wm.ply_import(filepath=filepath, **kwargs)
# Create default material
material = create_material('ply_material')
material.map_vertex_color()
Expand Down
3 changes: 3 additions & 0 deletions blenderproc/python/loader/ReplicaLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,9 @@ def load_replica(data_path: str, data_set_name: str, use_smooth_shading: bool =
"""
file_path = os.path.join(data_path, data_set_name, 'mesh.ply')
loaded_objects = load_obj(file_path)
# Replica comes with incorrect custom normals, so remove them
for obj in loaded_objects:
obj.clear_custom_splitnormals()

if use_smooth_shading:
for obj in loaded_objects:
Expand Down
7 changes: 5 additions & 2 deletions blenderproc/python/loader/ShapeNetLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@


def load_shapenet(data_path: str, used_synset_id: str, used_source_id: str = "",
move_object_origin: bool = True) -> MeshObject:
move_object_origin: bool = True, validate_meshes: bool = False) -> MeshObject:
""" This loads an object from ShapeNet based on the given synset_id, which specifies the category of objects to use.
From these objects one is randomly sampled and loaded.
Expand All @@ -30,6 +30,9 @@ def load_shapenet(data_path: str, used_synset_id: str, used_source_id: str = "",
:param move_object_origin: Moves the object center to the bottom of the bounding box in Z direction and also in the
middle of the X and Y plane, this does not change the `.location` of the object.
Default: True
:param validate_meshes: If set to True, imported meshed will be validated and corrected.
This might help for some ShapeNet objects to e.g. remove duplicate faces.
However, it might lead to the texturing being destroyed.
:return: The loaded mesh object.
"""
data_path = resolve_path(data_path)
Expand All @@ -39,7 +42,7 @@ def load_shapenet(data_path: str, used_synset_id: str, used_source_id: str = "",
taxonomy_file_path, data_path)
selected_obj = random.choice(files_with_fitting_synset)
# with the new version the textures are all wrong
loaded_objects = load_obj(selected_obj, use_legacy_obj_import=True)
loaded_objects = load_obj(selected_obj, validate_meshes=validate_meshes)

# In shapenet every .obj file only contains one object, make sure that is the case
if len(loaded_objects) != 1:
Expand Down
17 changes: 12 additions & 5 deletions blenderproc/python/material/Dust.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def add_dust(material: Material, strength: float, texture_nodes: List[bpy.types.
# the used dust color is a grey with a tint in orange
dust_color.inputs["Base Color"].default_value = [0.8, 0.773, 0.7, 1.0]
dust_color.inputs["Roughness"].default_value = 1.0
dust_color.inputs["Specular"].default_value = 0.0
dust_color.inputs["Specular IOR Level"].default_value = 0.0
links.new(dust_color.outputs["BSDF"], mix_shader.inputs[2])

# create the input and output nodes inside of the group
Expand All @@ -156,10 +156,17 @@ def add_dust(material: Material, strength: float, texture_nodes: List[bpy.types.
group_input.location = (x_pos + x_diff * 7, y_pos - y_diff * 0.5)

# create sockets for the outside of the group match them to the mix shader
group.outputs.new(mix_shader.outputs[0].bl_idname, mix_shader.outputs[0].name)
group.inputs.new(mix_shader.inputs[1].bl_idname, mix_shader.inputs[1].name)
group.inputs.new(multiply_node.inputs[1].bl_idname, "Dust strength")
group.inputs.new(mapping_node.inputs["Scale"].bl_idname, "Texture scale")
group.interface.new_socket(
mix_shader.outputs[0].name, in_out='OUTPUT', socket_type=mix_shader.outputs[0].bl_idname)
group.interface.new_socket(
mix_shader.inputs[1].name, in_out='INPUT', socket_type=mix_shader.inputs[1].bl_idname)
group.interface.new_socket(
"Dust strength", in_out='INPUT', socket_type=multiply_node.inputs[1].bl_idname)
# We set the socket_type='NodeSocketVector' directly instead of using
# 'mapping.node.inputs["Scale"].bl_idname', because the 'Scale' has a specific bl_idname of
# 'NodeSocketVectorXYZ', but 'new_socket' expects 'NodeSocketVector'.
group.interface.new_socket(
"Texture scale", in_out='INPUT', socket_type='NodeSocketVector')

# link the input and output to the mix shader
links.new(group_input.outputs[0], mix_shader.inputs[1])
Expand Down
6 changes: 3 additions & 3 deletions blenderproc/python/material/MaterialLoaderUtility.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def add_specular(nodes: bpy.types.Nodes, links: bpy.types.NodeLinks, specular_im
if os.path.exists(specular_image_path):
specular_texture = create_image_node(nodes, specular_image_path, True,
_x_texture_node, 0)
links.new(specular_texture.outputs["Color"], principled_bsdf.inputs["Specular"])
links.new(specular_texture.outputs["Color"], principled_bsdf.inputs["Specular IOR Level"])
return specular_texture
return None

Expand Down Expand Up @@ -347,7 +347,7 @@ def add_displacement(nodes: bpy.types.Nodes, links: bpy.types.NodeLinks, displac
_y_texture_node * -4)
displacement_node = nodes.new("ShaderNodeDisplacement")
displacement_node.inputs["Midlevel"].default_value = 0.5
displacement_node.inputs["Scale"].default_value = 0.15
displacement_node.inputs["Scale"].default_value = 0.03
displacement_node.location.x = _x_texture_node * 0.5
displacement_node.location.y = _y_texture_node * -4
links.new(displacement_texture.outputs["Color"], displacement_node.inputs["Height"])
Expand Down Expand Up @@ -482,7 +482,7 @@ def change_to_texture_less_render(use_alpha_channel):
principled_bsdf = Utility.get_the_one_node_with_type(nodes, "BsdfPrincipled")

# setting the color values for the shader
principled_bsdf.inputs['Specular'].default_value = 0.65 # specular
principled_bsdf.inputs['Specular IOR Level'].default_value = 0.65 # specular
principled_bsdf.inputs['Roughness'].default_value = 0.2 # roughness

for used_object in [obj for obj in bpy.context.scene.objects if hasattr(obj.data, 'materials')]:
Expand Down
21 changes: 14 additions & 7 deletions blenderproc/python/object/PhysicsSimulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def simulate_physics_and_fix_final_poses(min_simulation_time: float = 4.0, max_s
check_object_interval: float = 2.0,
object_stopped_location_threshold: float = 0.01,
object_stopped_rotation_threshold: float = 0.1, substeps_per_frame: int = 10,
solver_iters: int = 10, verbose: bool = False):
solver_iters: int = 10, verbose: bool = False, use_volume_com: bool = False):
""" Simulates the current scene and in the end fixes the final poses of all active objects.
The simulation is run for at least `min_simulation_time` seconds and at a maximum `max_simulation_time` seconds.
Expand All @@ -36,18 +36,21 @@ def simulate_physics_and_fix_final_poses(min_simulation_time: float = 4.0, max_s
:param substeps_per_frame: Number of simulation steps taken per frame.
:param solver_iters: Number of constraint solver iterations made per simulation step.
:param verbose: If True, more details during the physics simulation are printed.
:param use_volume_com: If True, the center of mass will be calculated by using the object volume.
This is more accurate than using the surface area (default), but requires a watertight mesh.
"""
# Undo changes made in the simulation like origin adjustment and persisting the object's scale
with UndoAfterExecution():
# Run simulation and remember poses before and after
obj_poses_before_sim = _PhysicsSimulation.get_pose()
origin_shifts = simulate_physics(min_simulation_time, max_simulation_time, check_object_interval,
object_stopped_location_threshold, object_stopped_rotation_threshold,
substeps_per_frame, solver_iters, verbose)
substeps_per_frame, solver_iters, verbose, use_volume_com)
obj_poses_after_sim = _PhysicsSimulation.get_pose()

# Make sure to remove the simulation cache as we are only interested in the final poses
bpy.ops.ptcache.free_bake({"point_cache": bpy.context.scene.rigidbody_world.point_cache})
with bpy.context.temp_override(point_cache=bpy.context.scene.rigidbody_world.point_cache):
bpy.ops.ptcache.free_bake()

# Fix the pose of all objects to their pose at the end of the simulation (also revert origin shift)
for obj in get_all_mesh_objects():
Expand Down Expand Up @@ -76,7 +79,7 @@ def simulate_physics_and_fix_final_poses(min_simulation_time: float = 4.0, max_s
def simulate_physics(min_simulation_time: float = 4.0, max_simulation_time: float = 40.0,
check_object_interval: float = 2.0, object_stopped_location_threshold: float = 0.01,
object_stopped_rotation_threshold: float = 0.1, substeps_per_frame: int = 10,
solver_iters: int = 10, verbose: bool = False) -> dict:
solver_iters: int = 10, verbose: bool = False, use_volume_com: bool = False) -> dict:
""" Simulates the current scene.
The simulation is run for at least `min_simulation_time` seconds and at a maximum `max_simulation_time` seconds.
Expand All @@ -100,14 +103,16 @@ def simulate_physics(min_simulation_time: float = 4.0, max_simulation_time: floa
:param substeps_per_frame: Number of simulation steps taken per frame.
:param solver_iters: Number of constraint solver iterations made per simulation step.
:param verbose: If True, more details during the physics simulation are printed.
:param use_volume_com: If True, the center of mass will be calculated by using the object volume.
This is more accurate than using the surface area (default), but requires a watertight mesh.
:return: A dict containing for every active object the shift that was added to their origins.
"""
# Shift the origin of all objects to their center of mass to make the simulation more realistic
origin_shift = {}
for obj in get_all_mesh_objects():
if obj.has_rigidbody_enabled():
prev_origin = obj.get_origin()
new_origin = obj.set_origin(mode="CENTER_OF_VOLUME")
new_origin = obj.set_origin(mode="ORIGIN_CENTER_OF_VOLUME" if use_volume_com else "CENTER_OF_MASS")
origin_shift[obj.get_name()] = new_origin - prev_origin

# Persist mesh scaling as having a scale != 1 can make the simulation unstable
Expand Down Expand Up @@ -184,7 +189,8 @@ def do_simulation(min_simulation_time: float, max_simulation_time: float, check_
# Simulate current interval
point_cache.frame_end = current_frame
with stdout_redirected(enabled=not verbose):
bpy.ops.ptcache.bake({"point_cache": point_cache}, bake=True)
with bpy.context.temp_override(point_cache=point_cache):
bpy.ops.ptcache.bake(bake=True)

# Go to second last frame and get poses
bpy.context.scene.frame_set(current_frame - _PhysicsSimulation.seconds_to_frames(1))
Expand All @@ -205,7 +211,8 @@ def do_simulation(min_simulation_time: float, max_simulation_time: float, check_
else:
# Free bake (this will not completely remove the simulation cache, so further simulations can
# reuse the already calculated frames)
bpy.ops.ptcache.free_bake({"point_cache": point_cache})
with bpy.context.temp_override(point_cache=point_cache):
bpy.ops.ptcache.free_bake()

@staticmethod
def get_pose() -> dict:
Expand Down
Loading

0 comments on commit ee197a9

Please sign in to comment.