From dd9bc6a06406464aca31530d0d7d668b333d5994 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Thu, 18 Nov 2021 18:34:13 +0100
Subject: [PATCH 01/46] Fix(MaterialUtility): recover references, add
created_in_func, add remove_emissive
---
blenderproc/python/types/MaterialUtility.py | 43 ++++++++++++++++-----
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/blenderproc/python/types/MaterialUtility.py b/blenderproc/python/types/MaterialUtility.py
index fc3f846a9..de71d8b48 100644
--- a/blenderproc/python/types/MaterialUtility.py
+++ b/blenderproc/python/types/MaterialUtility.py
@@ -22,6 +22,8 @@ def update_blender_ref(self, name):
:param name: The name of the instance which will be used to update its blender reference.
"""
self.blender_obj = bpy.data.materials[name]
+ self.nodes = bpy.data.materials[name].node_tree.nodes
+ self.links = bpy.data.materials[name].node_tree.links
def get_users(self) -> int:
""" Returns the number of users of the material.
@@ -37,31 +39,45 @@ def duplicate(self) -> "Material":
"""
return Material(self.blender_obj.copy())
- def get_the_one_node_with_type(self, node_type: str) -> bpy.types.Node:
+ def get_the_one_node_with_type(self, node_type: str, created_in_func: str = "") -> bpy.types.Node:
""" Returns the one node which is of the given node_type
This function will only work if there is only one of the nodes of this type.
:param node_type: The node type to look for.
+ :param created_in_func: only return node belonging to function defined by custom property 'created_in_func'
:return: The node.
"""
- return Utility.get_the_one_node_with_type(self.nodes, node_type)
+ return Utility.get_the_one_node_with_type(self.nodes, node_type, created_in_func)
- def get_nodes_with_type(self, node_type: str) -> List[bpy.types.Node]:
+ def get_nodes_with_type(self, node_type: str, created_in_func: str = "") -> List[bpy.types.Node]:
""" Returns all nodes which are of the given node_type
:param node_type: The note type to look for.
+ :param created_in_func: only return nodes belonging to function defined by custom property 'created_in_func'
:return: The list of nodes with the given type.
"""
- return Utility.get_nodes_with_type(self.nodes, node_type)
+ return Utility.get_nodes_with_type(self.nodes, node_type, created_in_func)
+
+ def get_nodes_created_in_func(self, created_in_func: str) -> List[bpy.types.Node]:
+ """ Returns all nodes which are of the given node_type
+
+ :param created_in_func: return nodes created in that function
+ :return: The list of nodes with the given type.
+ """
+ return Utility.get_nodes_created_in_func(self.nodes, created_in_func)
- def new_node(self, node_type: str) -> bpy.types.Node:
+ def new_node(self, node_type: str, created_in_func: str = "") -> bpy.types.Node:
""" Creates a new node in the material's node tree.
:param node_type: The desired type of the new node.
+ :param created_in_func: Set custom property
:return: The new node.
"""
- return self.nodes.new(node_type)
+ new_node = self.nodes.new(node_type)
+ if created_in_func:
+ new_node["created_in_func"] = created_in_func
+ return new_node
def remove_node(self, node: bpy.types.Node):
""" Removes the node from the material's node tree.
@@ -132,7 +148,12 @@ def map_vertex_color(self, layer_name: str = 'Col', active_shading: bool = True)
else:
raise Exception("Material '{}' has no node connected to the output, "
"which has as a 'Base Color' input.".format(self.blender_obj.name))
-
+ def remove_emissive(self):
+ """ Remove emissive part of the material.
+ """
+ for node in self.get_nodes_created_in_func(self.make_emissive.__name__):
+ self.remove_node(node)
+
def make_emissive(self, emission_strength: float, replace: bool = False, keep_using_base_color: bool = True,
emission_color: List[float] = None, non_emissive_color_socket: bpy.types.NodeSocket = None):
""" Makes the material emit light.
@@ -143,10 +164,12 @@ def make_emissive(self, emission_strength: float, replace: bool = False, keep_us
:param emission_color: The color of the light to emit. Is ignored if keep_using_base_color is set to True.
:param non_emissive_color_socket: An output socket that defines how the material should look like. By default that is the output of the principled shader node. Has no effect if replace is set to True.
"""
+ self.remove_emissive()
+
output_node = self.get_the_one_node_with_type("OutputMaterial")
if not replace:
- mix_node = self.new_node('ShaderNodeMixShader')
+ mix_node = self.new_node('ShaderNodeMixShader', self.make_emissive.__name__)
if non_emissive_color_socket is None:
principled_bsdf = self.get_the_one_node_with_type("BsdfPrincipled")
non_emissive_color_socket = principled_bsdf.outputs['BSDF']
@@ -156,13 +179,13 @@ def make_emissive(self, emission_strength: float, replace: bool = False, keep_us
# The light path node returns 1, if the material is hit by a ray coming from the camera, else it
# returns 0. In this way the mix shader will use the principled shader for rendering the color of
# the emitting surface itself, while using the emission shader for lighting the scene.
- light_path_node = self.new_node('ShaderNodeLightPath')
+ light_path_node = self.new_node('ShaderNodeLightPath', self.make_emissive.__name__)
self.link(light_path_node.outputs['Is Camera Ray'], mix_node.inputs['Fac'])
output_socket = mix_node.inputs[1]
else:
output_socket = output_node.inputs['Surface']
- emission_node = self.new_node('ShaderNodeEmission')
+ emission_node = self.new_node('ShaderNodeEmission', self.make_emissive.__name__)
if keep_using_base_color:
principled_bsdf = self.get_the_one_node_with_type("BsdfPrincipled")
From 25525b4586b41b735b7041c83dd8563189e74fb5 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Thu, 18 Nov 2021 18:37:06 +0100
Subject: [PATCH 02/46] Feat(Utility): get_nodes_created_in_func
---
blenderproc/python/utility/Utility.py | 23 +++++++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/blenderproc/python/utility/Utility.py b/blenderproc/python/utility/Utility.py
index 6edd23c99..2a4765c39 100644
--- a/blenderproc/python/utility/Utility.py
+++ b/blenderproc/python/utility/Utility.py
@@ -259,18 +259,22 @@ def get_node_connected_to_the_output_and_unlink_it(material: bpy.types.Material)
return node_connected_to_the_output, material_output
@staticmethod
- def get_nodes_with_type(nodes: List[bpy.types.Node], node_type: str) -> List[bpy.types.Node]:
+ def get_nodes_with_type(nodes: List[bpy.types.Node], node_type: str, created_in_func: str = "") -> List[bpy.types.Node]:
"""
Returns all nodes which are of the given node_type
:param nodes: list of nodes of the current material
:param node_type: node types
+ :param created_in_func: only return nodes belonging to function defined by custom property 'created_in_func'
:return: list of nodes, which belong to the type
"""
- return [node for node in nodes if node_type in node.bl_idname]
+ nodes_with_type = [node for node in nodes if node_type in node.bl_idname]
+ if created_in_func:
+ nodes_with_type = Utility.get_nodes_created_in_func(nodes_with_type, created_in_func)
+ return nodes_with_type
@staticmethod
- def get_the_one_node_with_type(nodes: List[bpy.types.Node], node_type: str) -> bpy.types.Node:
+ def get_the_one_node_with_type(nodes: List[bpy.types.Node], node_type: str, created_in_func: str = "") -> bpy.types.Node:
"""
Returns the one nodes which is of the given node_type
@@ -278,14 +282,25 @@ def get_the_one_node_with_type(nodes: List[bpy.types.Node], node_type: str) -> b
:param nodes: list of nodes of the current material
:param node_type: node types
+ :param created_in_func: only return node belonging to function defined by custom property 'created_in_func'
:return: node of the node type
"""
- node = Utility.get_nodes_with_type(nodes, node_type)
+ node = Utility.get_nodes_with_type(nodes, node_type, created_in_func)
if node and len(node) == 1:
return node[0]
else:
raise Exception("There is not only one node of this type: {}, there are: {}".format(node_type, len(node)))
+ @staticmethod
+ def get_nodes_created_in_func(nodes: List[bpy.types.Node], created_in_func: str) -> List[bpy.types.Node]:
+ """ Returns all nodes which are created in the given function
+
+ :param nodes: list of nodes of the current material
+ :param created_in_func: return nodes created in that function
+ :return: The list of nodes with the given type.
+ """
+ return [node for node in nodes if "created_in_func" in node and node["created_in_func"] == created_in_func]
+
@staticmethod
def read_suncg_lights_windows_materials() -> Tuple[Dict[str, Tuple[List[str], List[str]]], List[str]]:
"""
From 1db4c6b1a0b25de2787ce9c385bb9471fc925898 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Thu, 18 Nov 2021 18:37:59 +0100
Subject: [PATCH 03/46] Feat(BopWriterUtility): more informative errors
---
blenderproc/python/writer/BopWriterUtility.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blenderproc/python/writer/BopWriterUtility.py b/blenderproc/python/writer/BopWriterUtility.py
index 18fff3c34..8ba535900 100644
--- a/blenderproc/python/writer/BopWriterUtility.py
+++ b/blenderproc/python/writer/BopWriterUtility.py
@@ -202,6 +202,8 @@ def _get_frame_gt(dataset_objects: List[bpy.types.Mesh], unit_scaling: float, ig
cam_R_m2c = cam_H_m2c.to_quaternion().to_matrix()
cam_t_m2c = cam_H_m2c.to_translation()
+ assert "category_id" in obj, "{} object has no custom property 'category_id'".format(obj.get_name())
+
# ignore examples that fell through the plane
if not np.linalg.norm(list(cam_t_m2c)) > ignore_dist_thres:
cam_t_m2c = list(cam_t_m2c * unit_scaling)
From 7867db437fe1caacb57971714d29cc2418300dbe Mon Sep 17 00:00:00 2001
From: Klaus Strobl
Date: Fri, 19 Nov 2021 11:18:30 +0100
Subject: [PATCH 04/46] doc(examples/*README*): Add lens_distortion in READMEs
---
examples/README.md | 3 ++-
examples/advanced/README.md | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/examples/README.md b/examples/README.md
index 8f9149780..0db9dacec 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,6 +1,6 @@
# Examples overview
-We structure our examples into three different group, we encourage everyone to start with the [basic examples](basics/README.md).
+We structure our examples into three different groups. We encourage everyone to start with the [basic examples](basics/README.md).
Each folder contains an example, some of those require external datasets and/or resources. Each example provides a valid configuration file(s) that can be used for getting some sort of output, a description, and, optionally, some resources.
@@ -33,6 +33,7 @@ These examples introduce usage of advanced BlenderProc modules and/or of their c
* [diffuse_color_image](advanced/diffuse_color_image/README.md): How to render a scene without any lighting or shading.
* [dust](advanced/dust/README.md): How to add dust on top objects, to make them look more real.
* [entity_displacement_modifier](advanced/entity_displacement_modifier/README.md): Using displacement modifiers with different textures.
+* [camera lens distortion generation and validation](advanced/lens_distortion/README.md): Add lens distortion from camera calibration to all output images.
* [material_randomizer](advanced/material_randomizer/README.md): Randomization of materials of selected objects.
* [motion_blur_rolling_shutter](advanced/motion_blur_rolling_shutter/README.md): Generating motion blur and a rolling shutter effects.
* [object_pose_sampling](advanced/object_pose_sampling/README.md): Complex use of a 6D pose sampler.
diff --git a/examples/advanced/README.md b/examples/advanced/README.md
index e79a0d9fb..2c0549881 100644
--- a/examples/advanced/README.md
+++ b/examples/advanced/README.md
@@ -1,5 +1,5 @@
# Advanced Examples
-These examples introduce usage of advanced BlenderProc modules and/or of their combinations.
+These examples introduce the usage of advanced BlenderProc modules and/or their combinations.
* [auto_shading](auto_shading/README.md): How to change the shading mode of an object.
* [camera_depth_of_field](camera_depth_of_field/README.md): Setting an object as the camera depth of field focus point.
@@ -7,6 +7,7 @@ These examples introduce usage of advanced BlenderProc modules and/or of their c
* [diffuse_color_image](diffuse_color_image/README.md): How to render a scene without any lighting or shading.
* [dust](dust/README.md): How to add dust on top objects, to make them look more real.
* [entity_displacement_modifier](entity_displacement_modifier/README.md): Using displacement modifiers with different textures.
+* [lens_distortion](lens_distortion/README.md): Add lens distortion from camera calibration to all output images.
* [material_randomizer](material_randomizer/README.md): Randomization of materials of selected objects.
* [motion_blur_rolling_shutter](motion_blur_rolling_shutter/README.md): Generating motion blur and a rolling shutter effects.
* [object_pose_sampling](object_pose_sampling/README.md): Complex use of a 6D pose sampler.
From 4614e7da08aefa0570222c99ff3ac8bcc5290663 Mon Sep 17 00:00:00 2001
From: "Strobl, Klaus"
Date: Fri, 19 Nov 2021 13:18:21 +0100
Subject: [PATCH 05/46] fix(examples/README): Folder name
---
examples/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/README.md b/examples/README.md
index 0db9dacec..c0c95bfe1 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -33,7 +33,7 @@ These examples introduce usage of advanced BlenderProc modules and/or of their c
* [diffuse_color_image](advanced/diffuse_color_image/README.md): How to render a scene without any lighting or shading.
* [dust](advanced/dust/README.md): How to add dust on top objects, to make them look more real.
* [entity_displacement_modifier](advanced/entity_displacement_modifier/README.md): Using displacement modifiers with different textures.
-* [camera lens distortion generation and validation](advanced/lens_distortion/README.md): Add lens distortion from camera calibration to all output images.
+* [lens_distortion](advanced/lens_distortion/README.md): Add lens distortion from camera calibration to all output images.
* [material_randomizer](advanced/material_randomizer/README.md): Randomization of materials of selected objects.
* [motion_blur_rolling_shutter](advanced/motion_blur_rolling_shutter/README.md): Generating motion blur and a rolling shutter effects.
* [object_pose_sampling](advanced/object_pose_sampling/README.md): Complex use of a 6D pose sampler.
From e8529af2ee55a6caea142478b24a2cb50a25ac5b Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Mon, 22 Nov 2021 10:39:28 +0100
Subject: [PATCH 06/46] Chore(MaterialUtility): Improve documentation
---
blenderproc/python/types/MaterialUtility.py | 9 +++++----
blenderproc/python/utility/Utility.py | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/blenderproc/python/types/MaterialUtility.py b/blenderproc/python/types/MaterialUtility.py
index de71d8b48..72f4cbe10 100644
--- a/blenderproc/python/types/MaterialUtility.py
+++ b/blenderproc/python/types/MaterialUtility.py
@@ -45,7 +45,7 @@ def get_the_one_node_with_type(self, node_type: str, created_in_func: str = "")
This function will only work if there is only one of the nodes of this type.
:param node_type: The node type to look for.
- :param created_in_func: only return node belonging to function defined by custom property 'created_in_func'
+ :param created_in_func: only return node created by the specified function
:return: The node.
"""
return Utility.get_the_one_node_with_type(self.nodes, node_type, created_in_func)
@@ -54,7 +54,7 @@ def get_nodes_with_type(self, node_type: str, created_in_func: str = "") -> List
""" Returns all nodes which are of the given node_type
:param node_type: The note type to look for.
- :param created_in_func: only return nodes belonging to function defined by custom property 'created_in_func'
+ :param created_in_func: only return nodes created by the specified function
:return: The list of nodes with the given type.
"""
return Utility.get_nodes_with_type(self.nodes, node_type, created_in_func)
@@ -62,7 +62,7 @@ def get_nodes_with_type(self, node_type: str, created_in_func: str = "") -> List
def get_nodes_created_in_func(self, created_in_func: str) -> List[bpy.types.Node]:
""" Returns all nodes which are of the given node_type
- :param created_in_func: return nodes created in that function
+ :param created_in_func: return all nodes created in the given function
:return: The list of nodes with the given type.
"""
return Utility.get_nodes_created_in_func(self.nodes, created_in_func)
@@ -71,7 +71,8 @@ def new_node(self, node_type: str, created_in_func: str = "") -> bpy.types.Node:
""" Creates a new node in the material's node tree.
:param node_type: The desired type of the new node.
- :param created_in_func: Set custom property
+ :param created_in_func: Save the function name in which this node was created as a custom property.
+ Allows to later retrieve and delete specific nodes again.
:return: The new node.
"""
new_node = self.nodes.new(node_type)
diff --git a/blenderproc/python/utility/Utility.py b/blenderproc/python/utility/Utility.py
index 2a4765c39..718607e00 100644
--- a/blenderproc/python/utility/Utility.py
+++ b/blenderproc/python/utility/Utility.py
@@ -265,7 +265,7 @@ def get_nodes_with_type(nodes: List[bpy.types.Node], node_type: str, created_in_
:param nodes: list of nodes of the current material
:param node_type: node types
- :param created_in_func: only return nodes belonging to function defined by custom property 'created_in_func'
+ :param created_in_func: only return nodes created by the specified function
:return: list of nodes, which belong to the type
"""
nodes_with_type = [node for node in nodes if node_type in node.bl_idname]
@@ -282,7 +282,7 @@ def get_the_one_node_with_type(nodes: List[bpy.types.Node], node_type: str, crea
:param nodes: list of nodes of the current material
:param node_type: node types
- :param created_in_func: only return node belonging to function defined by custom property 'created_in_func'
+ :param created_in_func: only return node created by the specified function
:return: node of the node type
"""
node = Utility.get_nodes_with_type(nodes, node_type, created_in_func)
From 55a4474bf4b45a9f049597013d83a421e543f149 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Tue, 23 Nov 2021 17:42:59 +0100
Subject: [PATCH 07/46] Feat(bop_challenge): hb script
---
.../datasets/bop_challenge/main_hb_random.py | 153 ++++++++++++++++++
1 file changed, 153 insertions(+)
create mode 100644 examples/datasets/bop_challenge/main_hb_random.py
diff --git a/examples/datasets/bop_challenge/main_hb_random.py b/examples/datasets/bop_challenge/main_hb_random.py
new file mode 100644
index 000000000..401a7a2ca
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_hb_random.py
@@ -0,0 +1,153 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+import random
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
+parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
+parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
+parser.add_argument('cc_textures_path', nargs='?', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', nargs='?', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'),
+ sys_paths = args.bop_toolkit_path,
+ mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
+ model_type = 'cad',
+ sys_paths = args.bop_toolkit_path,
+ mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'),
+ sys_paths = args.bop_toolkit_path,
+ mm2m = True)
+tyol_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'),
+ sys_paths = args.bop_toolkit_path,
+ mm2m = True)
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + tyol_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(200)
+
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
+ max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=20, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=2, replace=False))
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
+ mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+ obj.hide(False)
+
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89, uniform_volume = True)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 20,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.44,
+ radius_max = 1.42,
+ elevation_min = 5,
+ elevation_max = 89,
+ uniform_volume = True)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ dataset = args.bop_dataset_name,
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
From a70eec1fabb5abb0c7f133de8af3f8d5bccdc5fb Mon Sep 17 00:00:00 2001
From: Dominik Winkelbauer
Date: Fri, 26 Nov 2021 15:40:57 +0100
Subject: [PATCH 08/46] fix(pip): Removes cache of installed packages after
running "pip uninstall"
---
blenderproc/python/utility/SetupUtility.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/blenderproc/python/utility/SetupUtility.py b/blenderproc/python/utility/SetupUtility.py
index 2e9625fe8..3f16ac841 100644
--- a/blenderproc/python/utility/SetupUtility.py
+++ b/blenderproc/python/utility/SetupUtility.py
@@ -234,6 +234,9 @@ def uninstall_pip_packages(package_names: List[str], blender_path: str, major_ve
# Run pip uninstall
subprocess.Popen([python_bin, "-m", "pip", "uninstall"] + package_names, env=dict(os.environ, PYTHONPATH=packages_path)).wait()
+ # Clear installed packages cache
+ SetupUtility.clean_installed_packages_cache(blender_path, major_version)
+
@staticmethod
def _ensure_pip(python_bin: str, packages_path: str, pre_python_package_path: str, force_update: bool = False):
""" Make sure pip is installed and read in the already installed packages
From 18a6feb564a4845f7947d3d58e7ba0e0c82de305 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Fri, 26 Nov 2021 17:47:50 +0100
Subject: [PATCH 09/46] Feat(BopLader): New API load_bop_objs, load_bop_scene,
load_bop_intrinsics
---
blenderproc/api/loader/__init__.py | 2 +-
blenderproc/python/loader/BopLoader.py | 282 ++++++++++--------
.../python/modules/loader/BopLoaderModule.py | 44 ++-
blenderproc/python/utility/Utility.py | 2 +-
4 files changed, 190 insertions(+), 140 deletions(-)
diff --git a/blenderproc/api/loader/__init__.py b/blenderproc/api/loader/__init__.py
index 2a0e21b62..9034e5847 100644
--- a/blenderproc/api/loader/__init__.py
+++ b/blenderproc/api/loader/__init__.py
@@ -1,6 +1,6 @@
from blenderproc.python.loader.AMASSLoader import load_AMASS
from blenderproc.python.loader.BlendLoader import load_blend
-from blenderproc.python.loader.BopLoader import load_bop
+from blenderproc.python.loader.BopLoader import load_bop_objs, load_bop_scene, load_bop_intrinsics
from blenderproc.python.loader.CCMaterialLoader import load_ccmaterials
from blenderproc.python.loader.Front3DLoader import load_front3d
from blenderproc.python.loader.HavenMaterialLoader import load_haven_mat
diff --git a/blenderproc/python/loader/BopLoader.py b/blenderproc/python/loader/BopLoader.py
index fe5be072a..0104a2c58 100644
--- a/blenderproc/python/loader/BopLoader.py
+++ b/blenderproc/python/loader/BopLoader.py
@@ -7,6 +7,7 @@
import numpy as np
from mathutils import Matrix, Vector
+from blenderproc.python.utility.SetupUtility import SetupUtility
import blenderproc.python.camera.CameraUtility as CameraUtility
from blenderproc.python.types.MeshObjectUtility import MeshObject
from blenderproc.python.utility.Utility import Utility
@@ -14,167 +15,202 @@
from blenderproc.python.types.MaterialUtility import Material
-def load_bop(bop_dataset_path: str, sys_paths: Union[List[str], str], temp_dir: str = None, model_type: str = "", cam_type: str = "", split: str = "test", scene_id: int = -1, obj_ids: list = [], sample_objects: bool = False, num_of_objs_to_sample: int = None, obj_instances_limit: int = -1, move_origin_to_x_y_plane: bool = False, source_frame: list = ["X", "-Y", "-Z"], mm2m: bool = False) -> List[MeshObject]:
- """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes
-
- - Interfaces with the bob_toolkit, allows loading of train, val and test splits
- - Relative camera poses are loaded/computed with respect to a reference model
- - Sets real camera intrinsics
+def load_bop_objs(bop_dataset_path: str, model_type: str = "", obj_ids: list = [], sample_objects: bool = False, num_of_objs_to_sample: int = None,
+ obj_instances_limit: int = -1, mm2m: bool = False, move_origin_to_x_y_plane: bool = False, temp_dir: str = None, ) -> List[MeshObject]:
+ """ Loads all or a subset of 3D models of any BOP dataset
:param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
- :param sys_paths: System paths to append. Can be a string or a list of strings.
- :param temp_dir: A temp directory which is used for writing the temporary .ply file.
- :param model_type: Optionally, specify type of BOP model. Available: [reconst, cad or eval].
- :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
- :param split: Optionally, test or val split depending on BOP dataset.
- :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
- only BOP Objects are loaded).
- :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not
- specified).
+ :param model_type: Optionally, specify type of BOP model. Available: [reconst, cad or eval].
+ :param obj_ids: List of object ids to load. Default: [] (load all objects from the given BOP dataset)
:param sample_objects: Toggles object sampling from the specified dataset.
:param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset
actually contains, then all objects will be loaded.
:param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit).
+ :param mm2m: Specify whether to convert poses and models to meters.
:param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with
pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in
the background.
- :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
- frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
- to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
- :param mm2m: Specify whether to convert poses and models to meters.
+ :param temp_dir: A temp directory which is used for writing the temporary .ply file.
:return: The list of loaded mesh objects.
"""
- # Make sure sys_paths is a list
- if not isinstance(sys_paths, list):
- sys_paths = [sys_paths]
-
- for sys_path in sys_paths:
- if 'bop_toolkit' in sys_path:
- sys.path.append(sys_path)
+ bop_path, bop_dataset_name = BopLoader._setup_bop_toolkit(bop_dataset_path)
+
+ from bop_toolkit_lib import dataset_params
+ model_p = dataset_params.get_model_params(bop_path, bop_dataset_name, model_type=model_type if model_type else None)
+
+ # temp_dir, scale, bop_dataset_name, has_external_texture, model_p = setup_bop(bop_dataset_path, temp_dir, model_type, mm2m)
if temp_dir is None:
temp_dir = Utility.get_temporary_directory()
-
+
scale = 0.001 if mm2m else 1
- bop_dataset_name = os.path.basename(bop_dataset_path)
has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
- if obj_ids or sample_objects:
- allow_duplication = True
+ allow_duplication = obj_ids or sample_objects
+
+ obj_ids = obj_ids if obj_ids else model_p['obj_ids']
+
+ loaded_objects = []
+ # if sampling is enabled
+ if sample_objects:
+ loaded_ids = {}
+ loaded_amount = 0
+ if obj_instances_limit != -1 and len(obj_ids) * obj_instances_limit < num_of_objs_to_sample:
+ raise RuntimeError("{}'s {} split contains {} objects, {} object where requested to sample with "
+ "an instances limit of {}. Raise the limit amount or decrease the requested "
+ "amount of objects.".format(bop_dataset_path, split, len(obj_ids),
+ num_of_objs_to_sample,
+ obj_instances_limit))
+ while loaded_amount != num_of_objs_to_sample:
+ random_id = choice(obj_ids)
+ if random_id not in loaded_ids.keys():
+ loaded_ids.update({random_id: 0})
+ # if there is no limit or if there is one, but it is not reached for this particular object
+ if obj_instances_limit == -1 or loaded_ids[random_id] < obj_instances_limit:
+ cur_obj = BopLoader._load_mesh(random_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
+ loaded_ids[random_id] += 1
+ loaded_amount += 1
+ loaded_objects.append(cur_obj)
+ else:
+ print("ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
+ "being requested".format(random_id, loaded_ids[random_id], obj_instances_limit,
+ loaded_amount, num_of_objs_to_sample))
else:
- allow_duplication = False
-
- datasets_path = os.path.dirname(bop_dataset_path)
- dataset = os.path.basename(bop_dataset_path)
-
- print("bob: {}, dataset_path: {}".format(bop_dataset_path, datasets_path))
- print("dataset: {}".format(dataset))
+ for obj_id in obj_ids:
+ cur_obj = BopLoader._load_mesh(obj_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
+ loaded_objects.append(cur_obj)
+ # move the origin of the object to the world origin and on top of the X-Y plane
+ # makes it easier to place them later on, this does not change the `.location`
+ # This is only useful if the BOP objects are not used in a pose estimation scenario.
+ if move_origin_to_x_y_plane:
+ for obj in loaded_objects:
+ obj.move_origin_to_bottom_mean_point()
+
+ return loaded_objects
+
+def load_bop_scene(bop_dataset_path: str, scene_id: int, model_type: str = "", cam_type: str = "", split: str = "test", source_frame: list = ["X", "-Y", "-Z"], mm2m: bool = False, temp_dir: str = None) -> List[MeshObject]:
+ """ Replicate a BOP scene from the given dataset: load scene objects, object poses, camera intrinsics and extrinsics
- try:
- from bop_toolkit_lib import dataset_params, inout
- except ImportError as error:
- print('ERROR: Please download the bop_toolkit package and add it to sys_paths in config!')
- print('https://github.com/thodan/bop_toolkit')
- raise error
+ - Interfaces with the bob_toolkit, allows loading of train, val and test splits
+ - Relative camera poses are loaded/computed with respect to a reference model
+ - Sets real camera intrinsics
- model_p = dataset_params.get_model_params(datasets_path, dataset, model_type=model_type if model_type else None)
- cam_p = dataset_params.get_camera_params(datasets_path, dataset, cam_type=cam_type if cam_type else None)
+ :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
+ :param scene_id: Specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
+ only BOP Objects are loaded).
+ :param model_type: Optionally, specify type of BOP model. Available: [reconst, cad or eval].
+ :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
+ :param split: Optionally, test or val split depending on BOP dataset.
+ :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
+ frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
+ to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
+ :param mm2m: Specify whether to convert poses and models to meters.
+ :param temp_dir: A temp directory which is used for writing the temporary .ply file.
+ :return: The list of loaded mesh objects.
+ """
+ bop_path, bop_dataset_name = BopLoader._setup_bop_toolkit(bop_dataset_path)
+
+ from bop_toolkit_lib import dataset_params, inout
+ model_p = dataset_params.get_model_params(bop_path, bop_dataset_name, model_type=model_type if model_type else None)
try:
- split_p = dataset_params.get_split_params(datasets_path, dataset, split=split)
+ split_p = dataset_params.get_split_params(bop_path, bop_dataset_name, split=split, split_type=cam_type if cam_type else None)
except ValueError:
- raise Exception("Wrong path or {} split does not exist in {}.".format(split, dataset))
+ raise Exception("Wrong path or {} split does not exist in {}.".format(split, bop_dataset_name))
+ sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
+ sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
+
+ if temp_dir is None:
+ temp_dir = Utility.get_temporary_directory()
+
+ scale = 0.001 if mm2m else 1
+ has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
+
+ for i, (cam_id, insts) in enumerate(sc_gt.items()):
+ cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(sc_camera, cam_id, insts, scale)
- bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
- bpy.context.scene.render.resolution_y = cam_p['im_size'][1]
+ if i == 0:
+ # define world = first camera
+ cam_H_m2w_ref = cam_H_m2c_ref.copy()
- loaded_objects = []
+ cur_objs = []
+ # load scene objects and set their poses
+ for inst in insts:
+ cur_objs.append(BopLoader._load_mesh(inst['obj_id'], model_p, bop_dataset_name, has_external_texture, temp_dir, False, scale))
+ BopLoader.set_object_pose(cur_objs[-1], inst, scale)
- # only load all/selected objects here, use other modules for setting poses
- # e.g. camera.CameraSampler / object.ObjectPoseSampler
- if scene_id == -1:
+ cam_H_c2w = BopLoader._compute_camera_to_world_trafo(cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
+ # set camera intrinsics
+ CameraUtility.set_intrinsics_from_K_matrix(cam_K, split_p['im_size'][0], split_p['im_size'][1])
- # TLESS exception because images are cropped
- if bop_dataset_name in ['tless']:
- cam_p['K'][0, 2] = split_p['im_size'][0] / 2
- cam_p['K'][1, 2] = split_p['im_size'][1] / 2
+ # set camera extrinsics as next frame
+ frame_id = CameraUtility.add_camera_pose(cam_H_c2w)
- # set camera intrinsics
- CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1])
-
- obj_ids = obj_ids if obj_ids else model_p['obj_ids']
- # if sampling is enabled
- if sample_objects:
- loaded_ids = {}
- loaded_amount = 0
- if obj_instances_limit != -1 and len(obj_ids) * obj_instances_limit < num_of_objs_to_sample:
- raise RuntimeError("{}'s {} split contains {} objects, {} object where requested to sample with "
- "an instances limit of {}. Raise the limit amount or decrease the requested "
- "amount of objects.".format(bop_dataset_path, split, len(obj_ids),
- num_of_objs_to_sample,
- obj_instances_limit))
- while loaded_amount != num_of_objs_to_sample:
- random_id = choice(obj_ids)
- if random_id not in loaded_ids.keys():
- loaded_ids.update({random_id: 0})
- # if there is no limit or if there is one, but it is not reached for this particular object
- if obj_instances_limit == -1 or loaded_ids[random_id] < obj_instances_limit:
- cur_obj = BopLoader._load_mesh(random_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
- loaded_ids[random_id] += 1
- loaded_amount += 1
- loaded_objects.append(cur_obj)
- else:
- print("ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
- "being requested".format(random_id, loaded_ids[random_id], obj_instances_limit,
- loaded_amount, num_of_objs_to_sample))
- else:
- for obj_id in obj_ids:
- cur_obj = BopLoader._load_mesh(obj_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
- loaded_objects.append(cur_obj)
+ # Add key frame for camera shift, as it changes from frame to frame in the tless replication
+ cam = bpy.context.scene.camera.data
+ cam.keyframe_insert(data_path='shift_x', frame=frame_id)
+ cam.keyframe_insert(data_path='shift_y', frame=frame_id)
- # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
- else:
- sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
- sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
- for i, (cam_id, insts) in enumerate(sc_gt.items()):
- cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(sc_camera, cam_id, insts, scale)
+ # Copy object poses to key frame (to be sure)
+ for cur_obj in cur_objs:
+ BopLoader._insert_key_frames(cur_obj, frame_id)
- if i == 0:
- # define world = first camera
- cam_H_m2w_ref = cam_H_m2c_ref.copy()
+ return cur_objs
- cur_objs = []
- # load scene objects and set their poses
- for inst in insts:
- cur_objs.append(BopLoader._load_mesh(inst['obj_id'], model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale))
- BopLoader.set_object_pose(cur_objs[-1], inst, scale)
+def load_bop_intrinsics(bop_dataset_path: str, split: str = "test", cam_type: str = "") -> tuple[np.ndarray, int, int]:
+ """
+ Load and set the camera matrix and image resolution of a specified BOP dataset
- cam_H_c2w = BopLoader._compute_camera_to_world_trafo(cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
- # set camera intrinsics
- CameraUtility.set_intrinsics_from_K_matrix(cam_K, split_p['im_size'][0], split_p['im_size'][1])
+ :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
+ :param split: Optionally, train, test or val split depending on BOP dataset, defaults to "test"
+ :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
+ :returns: camera matrix K, W, H
+ """
+
+ bop_path, bop_dataset_name = BopLoader._setup_bop_toolkit(bop_dataset_path)
+
+ from bop_toolkit_lib import dataset_params
+ cam_p = dataset_params.get_camera_params(bop_path, bop_dataset_name, cam_type=cam_type if cam_type else None)
+
+ try:
+ split_p = dataset_params.get_split_params(bop_path, bop_dataset_name, split=split, split_type=cam_type if cam_type else None)
+ except ValueError:
+ raise Exception("Wrong path or {} split does not exist in {}.".format(split, bop_dataset_name))
+
+ # TLESS exception because images are cropped
+ if bop_dataset_name in ['tless']:
+ cam_p['K'][0, 2] = split_p['im_size'][0] / 2
+ cam_p['K'][1, 2] = split_p['im_size'][1] / 2
+
+ # set camera intrinsics
+ CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1])
+
+ return cam_p['K'], split_p['im_size'][0], split_p['im_size'][1]
- # set camera extrinsics as next frame
- frame_id = CameraUtility.add_camera_pose(cam_H_c2w)
+class BopLoader:
- # Add key frame for camera shift, as it changes from frame to frame in the tless replication
- cam = bpy.context.scene.camera.data
- cam.keyframe_insert(data_path='shift_x', frame=frame_id)
- cam.keyframe_insert(data_path='shift_y', frame=frame_id)
+ @staticmethod
+ def _setup_bop_toolkit(bop_dataset_path: str) -> tuple[str, str]:
+ """
+ Install the bop_toolkit from Github and set an environment variable pointing to the BOP datasets
- # Copy object poses to key frame (to be sure)
- for cur_obj in cur_objs:
- BopLoader._insert_key_frames(cur_obj, frame_id)
+ :param cam_H_m2c_ref: (4x4) Homog trafo from object to camera coords.
+ :return (bop_path, bop_dataset_name): Path to BOP datasets and BOP dataset name
+ """
- # move the origin of the object to the world origin and on top of the X-Y plane
- # makes it easier to place them later on, this does not change the `.location`
- # This is only useful if the BOP objects are not used in a pose estimation scenario.
- if move_origin_to_x_y_plane:
- for obj in loaded_objects:
- obj.move_origin_to_bottom_mean_point()
+ bop_dataset_name = os.path.basename(bop_dataset_path)
+ bop_path = os.path.dirname(bop_dataset_path)
- return loaded_objects
+ print("bob: {}, dataset_path: {}".format(bop_dataset_path, bop_path))
+ print("dataset: {}".format(bop_dataset_name))
+ if not os.path.exists(bop_path):
+ raise ValueError("It seems the BOP dataset does not exist under the given path {}".format(bop_dataset_path))
-class BopLoader:
+ # Install bop_toolkit_lib
+ SetupUtility.setup_pip(["git+https://github.com/thodan/bop_toolkit"])
+ os.environ["BOP_PATH"] = bop_path
+
+ return bop_path, bop_dataset_name
@staticmethod
def _compute_camera_to_world_trafo(cam_H_m2w_ref: np.array, cam_H_m2c_ref: np.array, source_frame: list) -> np.ndarray:
diff --git a/blenderproc/python/modules/loader/BopLoaderModule.py b/blenderproc/python/modules/loader/BopLoaderModule.py
index 576f67370..8db9b2d87 100644
--- a/blenderproc/python/modules/loader/BopLoaderModule.py
+++ b/blenderproc/python/modules/loader/BopLoaderModule.py
@@ -1,5 +1,5 @@
from blenderproc.python.modules.loader.LoaderInterface import LoaderInterface
-from blenderproc.python.loader.BopLoader import load_bop
+from blenderproc.python.loader.BopLoader import load_bop_objs, load_bop_scene, load_bop_intrinsics
class BopLoaderModule(LoaderInterface):
@@ -80,21 +80,35 @@ def run(self):
num_of_objs_to_sample = None
obj_instances_limit = -1
- loaded_objects = load_bop(
+ scene_id = self.config.get_int("scene_id", -1)
+
+ if scene_id == -1:
+ loaded_objects = load_bop_objs(
+ bop_dataset_path=self.config.get_string("bop_dataset_path"),
+ model_type=self.config.get_string("model_type", ""),
+ obj_ids=self.config.get_list("obj_ids", []),
+ sample_objects=sample_objects,
+ num_of_objs_to_sample=num_of_objs_to_sample,
+ obj_instances_limit=obj_instances_limit,
+ move_origin_to_x_y_plane=self.config.get_bool("move_origin_to_x_y_plane", False),
+ mm2m=self.config.get_bool("mm2m", False),
+ temp_dir=self._temp_dir
+ )
+ load_bop_intrinsics(
bop_dataset_path=self.config.get_string("bop_dataset_path"),
- temp_dir=self._temp_dir,
- sys_paths=self.config.get_list("sys_paths"),
- model_type=self.config.get_string("model_type", ""),
- cam_type=self.config.get_string("cam_type", ""),
split=self.config.get_string("split", "test"),
- scene_id=self.config.get_int("scene_id", -1),
- obj_ids=self.config.get_list("obj_ids", []),
- sample_objects=sample_objects,
- num_of_objs_to_sample=num_of_objs_to_sample,
- obj_instances_limit=obj_instances_limit,
- move_origin_to_x_y_plane=self.config.get_bool("move_origin_to_x_y_plane", False),
- source_frame=self.config.get_list("source_frame", ["X", "-Y", "-Z"]),
- mm2m=self.config.get_bool("mm2m", False)
- )
+ cam_type=self.config.get_string("cam_type", "")
+ )
+ else:
+ loaded_objects = load_bop_scene(
+ bop_dataset_path=self.config.get_string("bop_dataset_path"),
+ scene_id=self.config.get_int("scene_id", -1),
+ model_type=self.config.get_string("model_type", ""),
+ cam_type=self.config.get_string("cam_type", ""),
+ split=self.config.get_string("split", "test"),
+ source_frame=self.config.get_list("source_frame", ["X", "-Y", "-Z"]),
+ mm2m=self.config.get_bool("mm2m", False),
+ temp_dir=self._temp_dir
+ )
self._set_properties(loaded_objects)
\ No newline at end of file
diff --git a/blenderproc/python/utility/Utility.py b/blenderproc/python/utility/Utility.py
index 718607e00..4adbcefef 100644
--- a/blenderproc/python/utility/Utility.py
+++ b/blenderproc/python/utility/Utility.py
@@ -296,7 +296,7 @@ def get_nodes_created_in_func(nodes: List[bpy.types.Node], created_in_func: str)
""" Returns all nodes which are created in the given function
:param nodes: list of nodes of the current material
- :param created_in_func: return nodes created in that function
+ :param created_in_func: return all nodes created in the given function
:return: The list of nodes with the given type.
"""
return [node for node in nodes if "created_in_func" in node and node["created_in_func"] == created_in_func]
From 923ef2f10a6fb5f906d87f7c79038b05c7128d6a Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Fri, 26 Nov 2021 17:49:24 +0100
Subject: [PATCH 10/46] Feat(BOP): Automatic bop_toolkit installation, remove
path
---
.../bop_object_on_surface_sampling/README.md | 21 ++++++++-----------
.../bop_object_on_surface_sampling/main.py | 13 ++++++------
.../bop_object_physics_positioning/README.md | 15 ++++++-------
.../bop_object_physics_positioning/main.py | 13 ++++++------
.../bop_object_pose_sampling/README.md | 15 ++++++-------
.../datasets/bop_object_pose_sampling/main.py | 7 ++++---
.../datasets/bop_scene_replication/README.md | 10 ++++-----
.../datasets/bop_scene_replication/main.py | 4 +---
8 files changed, 44 insertions(+), 54 deletions(-)
diff --git a/examples/datasets/bop_object_on_surface_sampling/README.md b/examples/datasets/bop_object_on_surface_sampling/README.md
index 8bedf2f43..7f947f40e 100644
--- a/examples/datasets/bop_object_on_surface_sampling/README.md
+++ b/examples/datasets/bop_object_on_surface_sampling/README.md
@@ -18,7 +18,6 @@ blenderproc download cc_textures
blenderproc run examples/datasets/bop_object_on_surface_sampling/main.py
-
resources/cctextures
examples/datasets/bop_object_on_surface_sampling/output
```
@@ -26,7 +25,6 @@ blenderproc run examples/datasets/bop_object_on_surface_sampling/main.py
* `examples/datasets/bop_object_on_surface_sampling/main.py`: path to the python file with pipeline configuration.
* ``: path to a folder containing BOP datasets.
* ``: name of BOP dataset for which ground truth should be saved, e.g. lm
-* ``: path to a bop_toolkit folder.
* `resources/cctextures`: path to CCTextures folder
* `examples/datasets/bop_object_on_surface_sampling/output`: path to an output folder.
@@ -35,9 +33,9 @@ To aggregate data and labels over multiple scenes, simply run the script multipl
## Steps
-* Load T-LESS BOP models: `bproc.loader.load_bop()`.
-* Load LM BOP models: `bproc.loader.load_bop`.
-* Load `` (YCB-V) BOP models: `bproc.loader.load_bop`.
+* Load T-LESS BOP models: `bproc.loader.load_bop_objs()`.
+* Load LM BOP models: `bproc.loader.load_bop_objs`.
+* Load `` (YCB-V) BOP models: `bproc.loader.load_bop_objs`.
* Sample colors for T-LESS models: `mat.set_principled_shader_value()`.
* Sample roughness and specular values for all objects: `mat.set_principled_shader_value()`.
* Construct planes: `bproc.object.create_primiative()`.
@@ -57,32 +55,31 @@ To aggregate data and labels over multiple scenes, simply run the script multipl
```python
# load distractor bop objects
-distractor_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
+distractor_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
model_type = 'cad',
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
```
```python
-distractor_bop_objs += bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
- sys_paths = args.bop_toolkit_path,
+distractor_bop_objs += bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
```
```python
# load a random sample of bop objects into the scene
-sampled_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+sampled_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 10)
+
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
```
* Here we are sampling BOP objects from 3 different datasets.
* We load 3 random objects from LM and T-LESS datasets, and 10 objects from the dataset given by `args.bop_dataset_name` (e.g. ycbv in this case).
-* Note that each loader loads the camera intrinsics and resolutions of each dataset, thus each subsequent `bproc.loader.load_bop` overwrites these intrinsics. In this example, `args.bop_dataset_name`(ycbv) dataset intrinsics are used when rendering. If required, they can be overwritten by setting `resolution_x, resolution_y, cam_K` in `bproc.camera.set_intrinsics_from_K_matrix()`.
+* In this example, `args.bop_dataset_name`(ycbv) dataset intrinsics are used when rendering by `bproc.loader.load_bop_intrinsics()`
### Material Manipulator
diff --git a/examples/datasets/bop_object_on_surface_sampling/main.py b/examples/datasets/bop_object_on_surface_sampling/main.py
index 9270a7e1b..d97324823 100644
--- a/examples/datasets/bop_object_on_surface_sampling/main.py
+++ b/examples/datasets/bop_object_on_surface_sampling/main.py
@@ -6,7 +6,6 @@
parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
-parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
parser.add_argument('cc_textures_path', nargs='?', default="resources/cctextures", help="Path to downloaded cc textures")
parser.add_argument('output_dir', nargs='?', default="examples/bop_object_on_surface_sampling/output", help="Path to where the final files will be saved ")
args = parser.parse_args()
@@ -14,25 +13,25 @@
bproc.init()
# load a random sample of bop objects into the scene
-sampled_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+sampled_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 10)
# load distractor bop objects
-distractor_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
+distractor_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
model_type = 'cad',
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
-distractor_bop_objs += bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
- sys_paths = args.bop_toolkit_path,
+distractor_bop_objs += bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
+
# set shading and physics properties and randomize PBR materials
for j, obj in enumerate(sampled_bop_objs + distractor_bop_objs):
obj.set_shading_mode('auto')
diff --git a/examples/datasets/bop_object_physics_positioning/README.md b/examples/datasets/bop_object_physics_positioning/README.md
index 99e189c27..5107462bc 100644
--- a/examples/datasets/bop_object_physics_positioning/README.md
+++ b/examples/datasets/bop_object_physics_positioning/README.md
@@ -18,7 +18,6 @@ blenderproc download cc_textures
blenderproc run examples/datasets/bop_object_physics_positioning/main.py
-
resources/cctextures
examples/datasets/bop_object_physics_positioning/output
```
@@ -26,7 +25,6 @@ blenderproc run examples/datasets/bop_object_physics_positioning/main.py
* `examples/datasets/bop_object_physics_positioning/main.py`: path to the python file with pipeline configuration.
* ``: path to a folder containing BOP datasets
* ``: name of BOP dataset for which ground truth should be saved, e.g. ycbv
-* ``: path to the bop_toolkit folder
* `resources/cctextures`: path to CCTextures folder
* `examples/datasets/bop_object_physics_positioning/output`: path to an output folder
@@ -35,9 +33,9 @@ To aggregate data and labels over multiple scenes, simply run the script multipl
## Steps
-* Load T-LESS BOP models: `bproc.loader.load_bop()`.
-* Load LM BOP models: `bproc.loader.load_bop`.
-* Load `` (YCB-V) BOP models: `bproc.loader.load_bop`.
+* Load T-LESS BOP models: `bproc.loader.load_bop_objs()`.
+* Load LM BOP models: `bproc.loader.load_bop_objs`.
+* Load `` (YCB-V) BOP models: `bproc.loader.load_bop_objs`.
* Sample colors for T-LESS models: `mat.set_principled_shader_value()`.
* Sample roughness and specular values for all objects: `mat.set_principled_shader_value()`.
* Construct planes: `bproc.object.create_primiative()`.
@@ -61,14 +59,12 @@ To aggregate data and labels over multiple scenes, simply run the script multipl
# load distractor bop objects
distractor_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
model_type = 'cad',
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
```
```python
distractor_bop_objs += bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
@@ -76,16 +72,17 @@ distractor_bop_objs += bproc.loader.load_bop(bop_dataset_path = os.path.join(arg
```python
# load a random sample of bop objects into the scene
sampled_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 10)
+
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
```
* Here we are sampling BOP objects from 3 different datasets.
* We load 3 random objects from LM and T-LESS datasets, and 10 objects from the dataset given by `""` (e.g. ycbv in this case).
* `"obj.set_shading_mode('auto')"` sets the shading for these corresponding objects to auto smoothing. This looks more realistic for meshes that have both sharp edges and curved surfaces like in YCB-V.
-* Note that each loader loads the camera intrinsics and resolutions of each dataset, thus each subsequent `BopLoader` overwrites these intrinsics. In this example, `""`(ycbv) dataset intrinsics are used when rendering. If required, they can be overwritten by setting `resolution_x, resolution_y, cam_K` in the camera sampler or global config.
+* In this example, `args.bop_dataset_name`(ycbv) dataset intrinsics are used when rendering by `bproc.loader.load_bop_intrinsics()`
### Material Manipulator
diff --git a/examples/datasets/bop_object_physics_positioning/main.py b/examples/datasets/bop_object_physics_positioning/main.py
index 877641f86..61331d621 100644
--- a/examples/datasets/bop_object_physics_positioning/main.py
+++ b/examples/datasets/bop_object_physics_positioning/main.py
@@ -6,7 +6,6 @@
parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
-parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
parser.add_argument('cc_textures_path', nargs='?', default="resources/cctextures", help="Path to downloaded cc textures")
parser.add_argument('output_dir', nargs='?', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
args = parser.parse_args()
@@ -14,26 +13,26 @@
bproc.init()
# load a random sample of bop objects into the scene
-sampled_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+sampled_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 10)
# load distractor bop objects
-distractor_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
+distractor_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
model_type = 'cad',
- sys_paths = args.bop_toolkit_path,
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3)
-distractor_bop_objs += bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
- sys_paths = args.bop_toolkit_path,
+distractor_bop_objs += bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'),
mm2m = True,
sample_objects = True,
num_of_objs_to_sample = 3,
obj_instances_limit = 1)
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
+
# set shading and physics properties and randomize PBR materials
for j, obj in enumerate(sampled_bop_objs + distractor_bop_objs):
obj.enable_rigidbody(True, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
diff --git a/examples/datasets/bop_object_pose_sampling/README.md b/examples/datasets/bop_object_pose_sampling/README.md
index 160c4604e..be9e688dd 100644
--- a/examples/datasets/bop_object_pose_sampling/README.md
+++ b/examples/datasets/bop_object_pose_sampling/README.md
@@ -6,19 +6,18 @@ This example shows how to load BOP objects and alternatingly sample light poses,
## Usage
-First make sure that you have downloaded a [BOP dataset](https://bop.felk.cvut.cz/datasets/) in the original folder structure. Also please clone the [BOP toolkit](https://github.com/thodan/bop_toolkit).
+First make sure that you have downloaded a [BOP dataset](https://bop.felk.cvut.cz/datasets/) in the original folder structure.
In [examples/datasets/bop_object_pose_sampling/main.py](main.py) set the `blender_install_path` where Blender is or should be installed.
Execute in the BlenderProc main directory:
```
-blenderproc run examples/datasets/bop_object_pose_sampling/main.py examples/datasets/bop_object_pose_sampling/output
+blenderproc run examples/datasets/bop_object_pose_sampling/main.py examples/datasets/bop_object_pose_sampling/output
```
* `examples/datasets/bop_object_pose_sampling/main.py`: path to the python file with pipeline configuration.
* ``: path to a folder containing BOP datasets.
* ``: name of BOP dataset, e.g. lm
-* ` `: path to the BOP toolkit containing dataset parameters, etc.
* `examples/datasets/bop_object_pose_sampling/output`: path to the output directory.
## Visualization
@@ -35,7 +34,7 @@ blenderproc vis coco /path/to/output_dir
## Steps
-* Loads object models and camera intrinsics from specified BOP dataset: `bproc.loader.load_bop()`.
+* Loads object models and camera intrinsics from specified BOP dataset: `bproc.loader.load_bop_objs()`, `bproc.loader.load_bop_intrinsics()`.
* Creates a point light sampled inside a shell
* Loops over five times:
* Sample Object Poses inside a cube with collision checks
@@ -48,14 +47,16 @@ blenderproc vis coco /path/to/output_dir
### BopLoader
-If `scene_id` is not specified (default = -1), `loader.BopLoader` simply loads all or the specified `obj_ids` from the BOP dataset given by `bop_dataset_path`.
+`bproc.loader.load_bop_objs()` simply loads all or the specified `obj_ids` from the BOP dataset given by `bop_dataset_path`.
+`bproc.loader.load_bop_intrinsics()` sets the intrinsics of the BOP dataset.
```python
-bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
split = 'val', # careful, some BOP datasets only have test sets
obj_ids = [1, 1, 3])
+
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
```
### CameraObjectSampler
diff --git a/examples/datasets/bop_object_pose_sampling/main.py b/examples/datasets/bop_object_pose_sampling/main.py
index fe57d62c7..1ab2f27f0 100644
--- a/examples/datasets/bop_object_pose_sampling/main.py
+++ b/examples/datasets/bop_object_pose_sampling/main.py
@@ -6,19 +6,20 @@
parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
-parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
parser.add_argument('output_dir', nargs='?', default="examples/bop_object_pose_sampling/output", help="Path to where the final files will be saved ")
args = parser.parse_args()
bproc.init()
# load specified bop objects into the scene
-bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
split = 'val', # careful, some BOP datasets only have test sets
obj_ids = [1, 1, 3])
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
+
# set shading
for j, obj in enumerate(bop_objs):
obj.set_shading_mode('auto')
diff --git a/examples/datasets/bop_scene_replication/README.md b/examples/datasets/bop_scene_replication/README.md
index cfaa1ca16..c72e0f069 100644
--- a/examples/datasets/bop_scene_replication/README.md
+++ b/examples/datasets/bop_scene_replication/README.md
@@ -6,19 +6,18 @@ This example shows how to synthetically recreate BOP scenes.
## Usage
-First make sure that you have downloaded a [BOP dataset](https://bop.felk.cvut.cz/datasets/) in the original folder structure. Also please clone the [BOP toolkit](https://github.com/thodan/bop_toolkit).
+First make sure that you have downloaded a [BOP dataset](https://bop.felk.cvut.cz/datasets/) in the original folder structure.
In [examples/datasets/bop_scene_replication/main.py](main.py) set the `blender_install_path` where Blender is or should be installed.
Execute in the BlenderProc main directory:
```
-blenderproc run examples/datasets/bop_scene_replication/main.py examples/datasets/bop_scene_replication/output
+blenderproc run examples/datasets/bop_scene_replication/main.py examples/datasets/bop_scene_replication/output
```
* `examples/datasets/bop_scene_replication/main.py`: path to the python file with pipeline configuration.
* ``: path to a folder containing BOP datasets.
* ``: name of BOP dataset, e.g. tless
-* ` `: path to the BOP toolkit containing dataset parameters, etc.
* `examples/datasets/bop_scene_replication/output`: path to the output directory.
## Visualization
@@ -31,7 +30,7 @@ blenderproc vis hdf5 example/bop_scene_replication/0.hdf5
## Steps
-* Loads BOP scene with object models, object poses, camera poses and camera intrinsics: `bproc.loader.load_bop()`.
+* Loads BOP scene with object models, object poses, camera poses and camera intrinsics: `bproc.loader.load_bop_scene()`.
* Creates a point light sampled inside a shell.
* Renders rgb: `bproc.renderer()`.
* Renders instance segmentation masks: `bproc.renderer()`.
@@ -42,8 +41,7 @@ blenderproc vis hdf5 example/bop_scene_replication/0.hdf5
### BopLoader
```python
-bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+bop_objs = bproc.loader.load_bop_scene(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
scene_id = 1,
split = 'test') # careful, some BOP datasets only have labeled 'val' sets
diff --git a/examples/datasets/bop_scene_replication/main.py b/examples/datasets/bop_scene_replication/main.py
index 267d6cf9a..24c67a416 100644
--- a/examples/datasets/bop_scene_replication/main.py
+++ b/examples/datasets/bop_scene_replication/main.py
@@ -5,15 +5,13 @@
parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
-parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
parser.add_argument('output_dir', nargs='?', default="examples/bop_scene_replication/output", help="Path to where the final files will be saved ")
args = parser.parse_args()
bproc.init()
# load specified bop objects into the scene
-bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
- sys_paths = args.bop_toolkit_path,
+bop_objs = bproc.loader.load_bop_scene(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
scene_id = 1,
split = 'test') # careful, some BOP datasets only have labeled 'val' sets
From cc5899819c7cbd819e18ff7c271e7ec70fc7b149 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Fri, 26 Nov 2021 17:51:07 +0100
Subject: [PATCH 11/46] Feat(BopChallenge): BlenderProc2 python scripts for all
BOP datasets
---
README_BlenderProc4BOP.md | 5 +
docs/tutorials/loader.md | 4 +-
examples/datasets/bop_challenge/README.md | 84 ++++++++--
.../datasets/bop_challenge/main_hb_random.py | 58 +++----
.../bop_challenge/main_icbin_random.py | 145 +++++++++++++++++
.../bop_challenge/main_itodd_random.py | 148 +++++++++++++++++
.../datasets/bop_challenge/main_lm_upright.py | 149 +++++++++++++++++
.../bop_challenge/main_tless_random.py | 151 ++++++++++++++++++
.../bop_challenge/main_tudl_random.py | 147 +++++++++++++++++
.../bop_challenge/main_ycbv_random.py | 147 +++++++++++++++++
10 files changed, 993 insertions(+), 45 deletions(-)
create mode 100644 examples/datasets/bop_challenge/main_icbin_random.py
create mode 100644 examples/datasets/bop_challenge/main_itodd_random.py
create mode 100644 examples/datasets/bop_challenge/main_lm_upright.py
create mode 100644 examples/datasets/bop_challenge/main_tless_random.py
create mode 100644 examples/datasets/bop_challenge/main_tudl_random.py
create mode 100644 examples/datasets/bop_challenge/main_ycbv_random.py
diff --git a/README_BlenderProc4BOP.md b/README_BlenderProc4BOP.md
index 4ae3385e6..e2fa59a5c 100644
--- a/README_BlenderProc4BOP.md
+++ b/README_BlenderProc4BOP.md
@@ -39,9 +39,14 @@ The cameras are positioned to cover the distribution of the ground-truth object
* [bop_object_pose_sampling](datasets/examples/datasets/bop_object_pose_sampling): Loads BOP objects and samples the camera, light poses and object poses in a free space.
+## Results
+
+Results of the BOP Challenge 2020 and the superiority of training training with BlenderProc images over ordinary OpenGL images is shown in our paper `BOP Challenge 2020 on 6D Object Localization` [5].
+
## References
[1] Hodaň, Michel et al.: [BOP: Benchmark for 6D Object Pose Estimation](http://cmp.felk.cvut.cz/~hodanto2/data/hodan2018bop.pdf), ECCV 2018.
[2] Hodaň et al.: [Photorealistic Image Synthesis for Object Instance Detection](https://arxiv.org/abs/1902.03334), ICIP 2019.
[3] Denninger, Sundermeyer et al.: [BlenderProc](https://arxiv.org/pdf/1911.01911.pdf), arXiv 2019.
[4] Pitteri, Ramamonjisoa et al.: [On Object Symmetries and 6D Pose Estimation from Images](https://arxiv.org/abs/1908.07640), CVPR 2020.
+[5] Hodan, Sundermeyer et al.: [BOP Challenge 2020 on 6D Object Localization](https://arxiv.org/pdf/2009.07378.pdf), ECCVW2020
\ No newline at end of file
diff --git a/docs/tutorials/loader.md b/docs/tutorials/loader.md
index 764bdbecf..8c18c2946 100644
--- a/docs/tutorials/loader.md
+++ b/docs/tutorials/loader.md
@@ -28,7 +28,9 @@ objs = bproc.loader.load_obj("mymesh.obj")
### Dataset-specific loaders:
* `bproc.loader.load_AMASS`: Loads objects from the AMASS Dataset.
-* `bproc.loader.load_bop`: Loads the 3D models of any BOP dataset and allows replicating BOP scenes.
+* `bproc.loader.load_bop_objs`: Loads the 3D models of any BOP dataset and allows replicating BOP scenes.
+* `bproc.loader.load_bop_scene`: Loads any real BOP scenes using 3D models.
+* `bproc.loader.load_bop_intrinsics`: Loads intrinsics of specified BOP dataset.
* `bproc.loader.load_front3d`: Loads 3D-Front scenes.
* `bproc.loader.load_ikea`: Loads objects from the IKEA dataset.
* `bproc.loader.load_pix3d`: Loads Pix3D objects.
diff --git a/examples/datasets/bop_challenge/README.md b/examples/datasets/bop_challenge/README.md
index 55b192d06..cc047bb4c 100644
--- a/examples/datasets/bop_challenge/README.md
+++ b/examples/datasets/bop_challenge/README.md
@@ -6,13 +6,80 @@
-Here you find the official BlenderProc configs that we used to generate the [provided synthetic data](https://bop.felk.cvut.cz/datasets/) for the BOP Challenge 2020 (7 core datasets). The output datasets are saved in [BOP Format](https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md) in chunks of 1000 images.
+Here you find the official BlenderProc implementations that we used to generate the [provided synthetic data](https://bop.felk.cvut.cz/datasets/) for the BOP Challenge (7 core datasets). The output datasets are saved in [BOP Format](https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md) in chunks of 1000 images.
-The prerendered datasets with 50K images each are available [here](https://bop.felk.cvut.cz/datasets/), where they are called "PBR-BlenderProc4BOP training images". We ran every config file 2000 times with 25 random cameras per scene.
+The prerendered datasets with 50K images each are available [here](https://bop.felk.cvut.cz/datasets/), where they are called "PBR-BlenderProc4BOP training images".
-For LineMOD, the objects are placed upright on a plane based on the [bop_object_on_surface_sampling](../bop_object_on_surface_sampling/README.md) example. All other datasets are created by dropping objects using physics based on the [bop_object_physics_positioning](../bop_object_physics_positioning/README.md) example. Make sure to read through them if you want to understand and adapt the configs.
+For LineMOD, the objects are placed upright on a plane based on the [bop_object_on_surface_sampling](../bop_object_on_surface_sampling/README.md) example. All other datasets are created by dropping objects using physics based on the [bop_object_physics_positioning](../bop_object_physics_positioning/README.md) example.
-## Usage
+## Instructions to generate the data
+
+Here, we explain the usage with the new python format introduced in BlenderProc2, for the original config files, check [below](#original-config-file-usage).
+
+Download the necessary [BOP datasets](https://bop.felk.cvut.cz/datasets/).
+
+Execute in the BlenderProc main directory:
+
+```
+blenderproc download cc_textures
+```
+
+The following command creates 50K training images in BOP format for the given dataset
+```
+blenderpoc run examples/datasets/bop_challenge/main__.py
+
+ resources/cctextures
+ examples/datasets/bop_challenge/output
+```
+
+* `examples/datasets/bop_challenge/main__.py`: path to the python file.
+* ``: path to a folder containing BOP datasets.
+* `resources/cctextures`: path to CCTextures folder
+* `examples/datasets/bop_challenge/output`: path to an output folder where the bop_data will be saved
+
+Tip: If you have access to multiple GPUs, you can speedup the process by dividing the 2000 iterations of the outer loop in a multiple of 40 iterations. Then run the script multiple times with different output folders. At the end, rename and merge the scenes in a joint folder. For example, if you have 10 GPUs, set the outer iterations to 200 and run the script 10 times with different output folders.
+
+### Complete the BlenderProc4BOP datasets
+
+To save some time and not copy functionality we use the bop_toolkit to generate the [masks](
+https://github.com/thodan/bop_toolkit/blob/master/scripts/calc_gt_masks.py), [scene_gt_info](https://github.com/thodan/bop_toolkit/blob/master/scripts/calc_gt_info.py) and [scene_gt_coco](https://github.com/thodan/bop_toolkit/blob/master/scripts/calc_gt_coco.py)
+
+To install the `bop_toolkit` run
+
+```bash
+git clone https://github.com/thodan/bop_toolkit
+cd bop_toolkit
+pip install -r requirements.txt -e .
+```
+
+Then at the top of the scripts mentioned above set the following parameters (keep other parameters unchanged):
+```python
+p = {
+ # See dataset_params.py for options.
+ 'dataset': '',
+
+ # Dataset split. Options: 'train', 'val', 'test'.
+ 'dataset_split': 'train',
+
+ # Dataset split type. None = default. See dataset_params.py for options.
+ 'dataset_split_type': 'pbr',
+
+ # Folder containing the BOP datasets.
+ 'datasets_path': '',
+}
+```
+
+To complete your BOP datasets, finally run:
+
+```bash
+python scripts/calc_gt_masks.py
+python scripts/calc_gt_info.py
+python scripts/calc_gt_coco.py
+```
+
+## Original Config file usage
+
+Instead of running the python script once, we ran every config file 2000 times with 25 random cameras per scene. This has the disadvantage that objects need to be loaded at each run.
Download the necessary [BOP datasets](https://bop.felk.cvut.cz/datasets/) and the [bop-toolkit](https://github.com/thodan/bop_toolkit).
@@ -38,11 +105,4 @@ blenderpoc run examples/datasets/bop_challenge/
* `resources/cctextures`: path to CCTextures folder
* `examples/datasets/bop_challenge/output`: path to an output folder where the bop_data will be saved
-This creates 25 images of a single scene. To create a whole dataset, simply run the command multiple times.
-
-### Note
-
-To save some time and not copy functionality we use the bop_toolkit to generate the [masks](
-https://github.com/thodan/bop_toolkit/blob/master/scripts/calc_gt_masks.py) and also the [scene_gt_info](https://github.com/thodan/bop_toolkit/blob/master/scripts/calc_gt_info.py). There, you will also find a Bop2coco annotations converter.
-
-Don't forget to set the paths to your generated BOP dataset in bop_toolkit_lib/config.py.
+This creates 25 images of a single scene. To create a whole dataset, simply run the command multiple times.
\ No newline at end of file
diff --git a/examples/datasets/bop_challenge/main_hb_random.py b/examples/datasets/bop_challenge/main_hb_random.py
index 401a7a2ca..2661c453c 100644
--- a/examples/datasets/bop_challenge/main_hb_random.py
+++ b/examples/datasets/bop_challenge/main_hb_random.py
@@ -2,34 +2,25 @@
import argparse
import os
import numpy as np
-import random
parser = argparse.ArgumentParser()
-parser.add_argument('bop_parent_path', nargs='?', help="Path to the bop datasets parent directory")
-parser.add_argument('bop_dataset_name', nargs='?', help="Main BOP dataset")
-parser.add_argument('bop_toolkit_path', nargs='?', help="Path to bop toolkit")
-parser.add_argument('cc_textures_path', nargs='?', default="resources/cctextures", help="Path to downloaded cc textures")
-parser.add_argument('output_dir', nargs='?', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
args = parser.parse_args()
bproc.init()
# load bop objects into the scene
-target_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'),
- sys_paths = args.bop_toolkit_path,
- mm2m = True)
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
# load distractor bop objects
-tless_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'),
- model_type = 'cad',
- sys_paths = args.bop_toolkit_path,
- mm2m = True)
-ycbv_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'),
- sys_paths = args.bop_toolkit_path,
- mm2m = True)
-tyol_dist_bop_objs = bproc.loader.load_bop(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'),
- sys_paths = args.bop_toolkit_path,
- mm2m = True)
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+tyol_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'))
# set shading and hide objects
for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + tyol_dist_bop_objs):
@@ -54,6 +45,7 @@
light_point = bproc.types.Light()
light_point.set_energy(200)
+# load cc_textures
cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
# Define a function that samples 6-DoF poses
@@ -63,17 +55,19 @@ def sample_pose_func(obj: bproc.types.MeshObject):
obj.set_location(np.random.uniform(min, max))
obj.set_rotation_euler(bproc.sampler.uniformSO3())
-# activate depth rendering and set amount of samples for color rendering
+# activate depth rendering without antialiasing and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.set_samples(50)
for i in range(2000):
+ # Sample bop objects for a scene
sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=20, replace=False))
sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=2, replace=False))
sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=2, replace=False))
sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=2, replace=False))
+ # Randomize materials and set physics
for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
mat = obj.get_materials()[0]
if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
@@ -83,14 +77,14 @@ def sample_pose_func(obj: bproc.types.MeshObject):
mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
obj.hide(False)
-
+
+ # Sample two light sources
light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
light_plane.replace_materials(light_plane_material)
-
light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
- elevation_min = 5, elevation_max = 89, uniform_volume = True)
+ elevation_min = 5, elevation_max = 89)
light_point.set_location(location)
# sample CC Texture and assign to room planes
@@ -121,10 +115,9 @@ def sample_pose_func(obj: bproc.types.MeshObject):
radius_min = 0.44,
radius_max = 1.42,
elevation_min = 5,
- elevation_max = 89,
- uniform_volume = True)
+ elevation_max = 89)
# Determine point of interest in scene as the object closest to the mean of a subset of objects
- poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15))
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
# Compute rotation based on vector going from location towards poi
rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
# Add homog cam pose based on location an rotation
@@ -141,12 +134,13 @@ def sample_pose_func(obj: bproc.types.MeshObject):
# Write data in bop format
bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
- dataset = args.bop_dataset_name,
- depth_scale = 0.1,
- depths = data["depth"],
- colors = data["colors"],
- color_file_format = "JPEG",
- ignore_dist_thres = 10)
+ target_objects = sampled_target_bop_objs,
+ dataset = 'hb',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
obj.disable_rigidbody()
diff --git a/examples/datasets/bop_challenge/main_icbin_random.py b/examples/datasets/bop_challenge/main_icbin_random.py
new file mode 100644
index 000000000..729319a7a
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_icbin_random.py
@@ -0,0 +1,145 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'icbin'), mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+hb_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'icbin'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + hb_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(200)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ obj.set_location(np.random.uniform([-0.2, -0.2, 0.0], [0.2, 0.2, 0.6]))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=20, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(hb_dist_bop_objs, size=2, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
+ mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 20,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.45,
+ radius_max = 1.08,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'icbin',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
diff --git a/examples/datasets/bop_challenge/main_itodd_random.py b/examples/datasets/bop_challenge/main_itodd_random.py
new file mode 100644
index 000000000..ba866217c
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_itodd_random.py
@@ -0,0 +1,148 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'itodd'), mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'itodd'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(20)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
+ max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=25, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=5, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.7)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 0.5))
+ if obj.get_cp("bop_dataset_name") == 'itodd':
+ mat.set_principled_shader_value("Specular", np.random.uniform(0.3, 1.0))
+ mat.set_principled_shader_value("Metallic", np.random.uniform(0, 1.0))
+ if obj.get_cp("bop_dataset_name") == 'tless':
+ mat.set_principled_shader_value("Metallic", np.random.uniform(0, 0.5))
+
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99, collision_margin=0.0005)
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(0.1,0.5),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 0.5, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 50,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.64,
+ radius_max = 0.78,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'itodd',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
diff --git a/examples/datasets/bop_challenge/main_lm_upright.py b/examples/datasets/bop_challenge/main_lm_upright.py
new file mode 100644
index 000000000..daf85c238
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_lm_upright.py
@@ -0,0 +1,149 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'), mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+tyol_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'lm'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + tyol_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(200)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
+ max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=15, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=3, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=3, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=3, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
+ mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Define a function that samples the initial pose of a given object above the ground
+ def sample_initial_pose(obj: bproc.types.MeshObject):
+ obj.set_location(bproc.sampler.upper_region(objects_to_sample_on=room_planes[0:1],
+ min_height=1, max_height=4, face_sample_range=[0.4, 0.6]))
+ obj.set_rotation_euler(np.random.uniform([0, 0, 0], [0, 0, np.pi * 2]))
+
+ # Sample objects on the given surface
+ placed_objects = bproc.object.sample_poses_on_surface(objects_to_sample=sampled_target_bop_objs + sampled_distractor_bop_objs,
+ surface=room_planes[0],
+ sample_pose_func=sample_initial_pose,
+ min_distance=0.01,
+ max_distance=0.2)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.35,
+ radius_max = 1.5,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=10, replace=False))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-0.7854, 0.7854))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'lm',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.hide(True)
\ No newline at end of file
diff --git a/examples/datasets/bop_challenge/main_tless_random.py b/examples/datasets/bop_challenge/main_tless_random.py
new file mode 100644
index 000000000..020d4fa23
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_tless_random.py
@@ -0,0 +1,151 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+
+# load distractor bop objects
+itodd_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'itodd'), mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+hb_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + itodd_dist_bop_objs + ycbv_dist_bop_objs + hb_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(100)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
+ max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=20))
+ sampled_distractor_bop_objs = list(np.random.choice(itodd_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(hb_dist_bop_objs, size=2, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 0.5))
+ if obj.get_cp("bop_dataset_name") == 'itodd':
+ mat.set_principled_shader_value("Metallic", np.random.uniform(0.5, 1.0))
+ if obj.get_cp("bop_dataset_name") == 'tless':
+ mat.set_principled_shader_value("Specular", np.random.uniform(0.3, 1.0))
+ mat.set_principled_shader_value("Metallic", np.random.uniform(0, 0.5))
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 20,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.65,
+ radius_max = 0.94,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'tless',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
diff --git a/examples/datasets/bop_challenge/main_tudl_random.py b/examples/datasets/bop_challenge/main_tudl_random.py
new file mode 100644
index 000000000..73dd9baf9
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_tudl_random.py
@@ -0,0 +1,147 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tudl'), mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+hb_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'tudl'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + hb_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(200)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.6, -0.6, 0.0], [-0.4, -0.4, 0.0])
+ max = np.random.uniform([0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=3, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=7, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=7, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(hb_dist_bop_objs, size=7, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
+ mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 20,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.85,
+ radius_max = 2.02,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=3))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'tudl',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
diff --git a/examples/datasets/bop_challenge/main_ycbv_random.py b/examples/datasets/bop_challenge/main_ycbv_random.py
new file mode 100644
index 000000000..b215117cb
--- /dev/null
+++ b/examples/datasets/bop_challenge/main_ycbv_random.py
@@ -0,0 +1,147 @@
+import blenderproc as bproc
+import argparse
+import os
+import numpy as np
+
+parser = argparse.ArgumentParser()
+parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
+parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
+parser.add_argument('output_dir', default="examples/bop_object_physics_positioning/output", help="Path to where the final files will be saved ")
+args = parser.parse_args()
+
+bproc.init()
+
+# load bop objects into the scene
+target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
+
+# load distractor bop objects
+tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
+hb_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
+tyol_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'), mm2m = True)
+
+# load BOP datset intrinsics
+bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'))
+
+# set shading and hide objects
+for obj in (target_bop_objs + tless_dist_bop_objs + hb_dist_bop_objs + tyol_dist_bop_objs):
+ obj.set_shading_mode('auto')
+ obj.hide(True)
+
+# create room
+room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
+ bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
+for plane in room_planes:
+ plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+
+# sample light color and strenght from ceiling
+light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
+light_plane.set_name('light_plane')
+light_plane_material = bproc.material.create('light_material')
+
+# sample point light on shell
+light_point = bproc.types.Light()
+light_point.set_energy(200)
+
+# load cc_textures
+cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
+
+# Define a function that samples 6-DoF poses
+def sample_pose_func(obj: bproc.types.MeshObject):
+ min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
+ max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
+ obj.set_location(np.random.uniform(min, max))
+ obj.set_rotation_euler(bproc.sampler.uniformSO3())
+
+# activate depth rendering without antialiasing and set amount of samples for color rendering
+bproc.renderer.enable_depth_output(activate_antialiasing=False)
+bproc.renderer.set_samples(50)
+
+for i in range(2000):
+
+ # Sample bop objects for a scene
+ sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=21, replace=False))
+ sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(hb_dist_bop_objs, size=2, replace=False))
+ sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=2, replace=False))
+
+ # Randomize materials and set physics
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ mat = obj.get_materials()[0]
+ if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
+ grey_col = np.random.uniform(0.1, 0.9)
+ mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
+ mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
+ mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
+ obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
+ obj.hide(False)
+
+ # Sample two light sources
+ light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
+ emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
+ light_plane.replace_materials(light_plane_material)
+ light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
+ location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
+ elevation_min = 5, elevation_max = 89)
+ light_point.set_location(location)
+
+ # sample CC Texture and assign to room planes
+ random_cc_texture = np.random.choice(cc_textures)
+ for plane in room_planes:
+ plane.replace_materials(random_cc_texture)
+
+
+ # Sample object poses and check collisions
+ bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
+ sample_pose_func = sample_pose_func,
+ max_tries = 1000)
+
+ # Physics Positioning
+ bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
+ max_simulation_time=10,
+ check_object_interval=1,
+ substeps_per_frame = 20,
+ solver_iters=25)
+
+ # BVH tree used for camera obstacle checks
+ bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
+
+ cam_poses = 0
+ while cam_poses < 25:
+ # Sample location
+ location = bproc.sampler.shell(center = [0, 0, 0],
+ radius_min = 0.61,
+ radius_max = 1.24,
+ elevation_min = 5,
+ elevation_max = 89)
+ # Determine point of interest in scene as the object closest to the mean of a subset of objects
+ poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
+ # Compute rotation based on vector going from location towards poi
+ rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
+ # Add homog cam pose based on location an rotation
+ cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
+
+ # Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
+ if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
+ # Persist camera pose
+ bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
+ cam_poses += 1
+
+ # render the whole pipeline
+ data = bproc.renderer.render()
+
+ # Write data in bop format
+ bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
+ target_objects = sampled_target_bop_objs,
+ dataset = 'ycbv',
+ depth_scale = 0.1,
+ depths = data["depth"],
+ colors = data["colors"],
+ color_file_format = "JPEG",
+ ignore_dist_thres = 10)
+
+ for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
+ obj.disable_rigidbody()
+ obj.hide(True)
From b6060832291f41552e9272f52d6473b16fe78be7 Mon Sep 17 00:00:00 2001
From: Martin Sundermeyer
Date: Fri, 26 Nov 2021 17:51:59 +0100
Subject: [PATCH 12/46] Feat(BopWriter): Allow setting target objects for
write_bop
---
.../python/modules/writer/BopWriter.py | 14 +++++++-----
blenderproc/python/writer/BopWriterUtility.py | 22 ++++++++++---------
2 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/blenderproc/python/modules/writer/BopWriter.py b/blenderproc/python/modules/writer/BopWriter.py
index 15e88da8d..74cb986d6 100644
--- a/blenderproc/python/modules/writer/BopWriter.py
+++ b/blenderproc/python/modules/writer/BopWriter.py
@@ -1,5 +1,6 @@
from blenderproc.python.modules.writer.WriterInterface import WriterInterface
from blenderproc.python.writer.BopWriterUtility import write_bop
+from blenderproc.python.type.MeshObjectUtility import MeshObject
import os
@@ -68,10 +69,11 @@ def run(self):
if self._avoid_output:
print("Avoid output is on, no output produced!")
else:
+ MeshObject.get_all_mesh_objects()
write_bop(output_dir = os.path.join(self._determine_output_dir(False), 'bop_data'),
- dataset = self._dataset,
- append_to_existing_output = self._append_to_existing_output,
- depth_scale = self._depth_scale,
- save_world2cam = self._save_world2cam,
- ignore_dist_thres = self._ignore_dist_thres,
- m2mm = self._mm2m)
\ No newline at end of file
+ dataset = self._dataset,
+ append_to_existing_output = self._append_to_existing_output,
+ depth_scale = self._depth_scale,
+ save_world2cam = self._save_world2cam,
+ ignore_dist_thres = self._ignore_dist_thres,
+ m2mm = self._mm2m)
\ No newline at end of file
diff --git a/blenderproc/python/writer/BopWriterUtility.py b/blenderproc/python/writer/BopWriterUtility.py
index 8ba535900..811d6eec0 100644
--- a/blenderproc/python/writer/BopWriterUtility.py
+++ b/blenderproc/python/writer/BopWriterUtility.py
@@ -1,3 +1,4 @@
+from blenderproc.python.types.MeshObjectUtility import MeshObject
import json
import os
import glob
@@ -15,13 +16,14 @@
from blenderproc.python.writer.WriterUtility import WriterUtility
-def write_bop(output_dir: str, depths: Optional[List[np.ndarray]] = None, colors: Optional[List[np.ndarray]] = None,
- color_file_format: str = "PNG", dataset: str = "", append_to_existing_output: bool = True,
- depth_scale: float = 1.0, jpg_quality: int = 95, save_world2cam: bool = True,
+def write_bop(output_dir: str, target_objects: Optional[List[MeshObject]] = None, depths: Optional[List[np.ndarray]] = None,
+ colors: Optional[List[np.ndarray]] = None, color_file_format: str = "PNG", dataset: str = "",
+ append_to_existing_output: bool = True, depth_scale: float = 1.0, jpg_quality: int = 95, save_world2cam: bool = True,
ignore_dist_thres: float = 100., m2mm: bool = True, frames_per_chunk: int = 1000):
"""Write the BOP data
:param output_dir: Path to the output directory.
+ :param target_objects: Objects for which to save ground truth poses in BOP format. Default: Save all objects or from specified dataset
:param depths: List of depth images in m to save
:param colors: List of color images to save
:param color_file_format: File type to save color images. Available: "PNG", "JPEG"
@@ -53,17 +55,17 @@ def write_bop(output_dir: str, depths: Optional[List[np.ndarray]] = None, colors
elif not append_to_existing_output:
raise Exception("The output folder already exists: {}.".format(dataset_dir))
- all_mesh_objects = get_all_blender_mesh_objects()
-
- # Select objects from the specified dataset.
- if dataset:
+ # Select target objects or objects from the specified dataset or all objects
+ if target_objects is not None:
+ dataset_objects = [t_obj.blender_obj for t_obj in target_objects]
+ elif dataset:
dataset_objects = []
- for obj in all_mesh_objects:
- if "bop_dataset_name" in obj:
+ for obj in get_all_blender_mesh_objects():
+ if "bop_dataset_name" in obj and not obj.hide_render:
if obj["bop_dataset_name"] == dataset:
dataset_objects.append(obj)
else:
- dataset_objects = all_mesh_objects
+ dataset_objects = get_all_blender_mesh_objects()
# Check if there is any object from the specified dataset.
if not dataset_objects:
From 46db849f956ae4d7382554e1d8f0392805dadb86 Mon Sep 17 00:00:00 2001
From: MartinSmeyer
Date: Sat, 27 Nov 2021 16:07:47 +0100
Subject: [PATCH 13/46] Fix(camera_object_pose): fct arguments
---
blenderproc/python/modules/writer/BopWriter.py | 2 --
examples/basics/camera_object_pose/README.md | 2 +-
examples/basics/camera_object_pose/main.py | 2 +-
3 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/blenderproc/python/modules/writer/BopWriter.py b/blenderproc/python/modules/writer/BopWriter.py
index 74cb986d6..8fde2591f 100644
--- a/blenderproc/python/modules/writer/BopWriter.py
+++ b/blenderproc/python/modules/writer/BopWriter.py
@@ -1,6 +1,5 @@
from blenderproc.python.modules.writer.WriterInterface import WriterInterface
from blenderproc.python.writer.BopWriterUtility import write_bop
-from blenderproc.python.type.MeshObjectUtility import MeshObject
import os
@@ -69,7 +68,6 @@ def run(self):
if self._avoid_output:
print("Avoid output is on, no output produced!")
else:
- MeshObject.get_all_mesh_objects()
write_bop(output_dir = os.path.join(self._determine_output_dir(False), 'bop_data'),
dataset = self._dataset,
append_to_existing_output = self._append_to_existing_output,
diff --git a/examples/basics/camera_object_pose/README.md b/examples/basics/camera_object_pose/README.md
index 88b338c18..1dad01cb8 100644
--- a/examples/basics/camera_object_pose/README.md
+++ b/examples/basics/camera_object_pose/README.md
@@ -80,7 +80,7 @@ bproc.camera.add_camera_pose(cam2world)
data = bproc.renderer.render()
# Write object poses, color and depth in bop format
-bproc.writer.write_bop(args.output_dir, data["depth"], data["colors"], m2mm=True, append_to_existing_output=True)
+bproc.writer.write_bop(args.output_dir, [obj], data["depth"], data["colors"], m2mm=True, append_to_existing_output=True)
```
* Saves all pose and camera information that is provided in BOP datasets.
diff --git a/examples/basics/camera_object_pose/main.py b/examples/basics/camera_object_pose/main.py
index 3faf6d65b..f99ba1496 100644
--- a/examples/basics/camera_object_pose/main.py
+++ b/examples/basics/camera_object_pose/main.py
@@ -60,4 +60,4 @@
data = bproc.renderer.render()
# Write object poses, color and depth in bop format
-bproc.writer.write_bop(args.output_dir, data["depth"], data["colors"], m2mm=True, append_to_existing_output=True)
+bproc.writer.write_bop(args.output_dir, [obj], data["depth"], data["colors"], m2mm=True, append_to_existing_output=True)
From 1d0e1ecc3ff059924e1655539f6bb766f75d62d0 Mon Sep 17 00:00:00 2001
From: MartinSmeyer
Date: Fri, 3 Dec 2021 11:21:36 +0100
Subject: [PATCH 14/46] Fix(load_objs_b remove split arg
---
examples/datasets/bop_object_pose_sampling/README.md | 1 -
examples/datasets/bop_object_pose_sampling/main.py | 1 -
2 files changed, 2 deletions(-)
diff --git a/examples/datasets/bop_object_pose_sampling/README.md b/examples/datasets/bop_object_pose_sampling/README.md
index be9e688dd..3c607f085 100644
--- a/examples/datasets/bop_object_pose_sampling/README.md
+++ b/examples/datasets/bop_object_pose_sampling/README.md
@@ -53,7 +53,6 @@ blenderproc vis coco /path/to/output_dir
```python
bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
- split = 'val', # careful, some BOP datasets only have test sets
obj_ids = [1, 1, 3])
bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name))
diff --git a/examples/datasets/bop_object_pose_sampling/main.py b/examples/datasets/bop_object_pose_sampling/main.py
index 1ab2f27f0..762bbfa76 100644
--- a/examples/datasets/bop_object_pose_sampling/main.py
+++ b/examples/datasets/bop_object_pose_sampling/main.py
@@ -14,7 +14,6 @@
# load specified bop objects into the scene
bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, args.bop_dataset_name),
mm2m = True,
- split = 'val', # careful, some BOP datasets only have test sets
obj_ids = [1, 1, 3])
# load BOP datset intrinsics
From 3eadcd3b67801703029446da39ddb95307092937 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:15:19 +0100
Subject: [PATCH 15/46] feat(blender_3.0.0): add blender 3.0.0
deactivate auto tile size, as this is not used anymore -> change iteration over devices to new API
---
blenderproc/python/renderer/RendererUtility.py | 3 ++-
blenderproc/python/utility/Initializer.py | 5 ++---
blenderproc/python/utility/InstallUtility.py | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index 51c4be883..b9cdc4677 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -100,7 +100,8 @@ def toggle_auto_tile_size(enable: bool):
:param enable: True, if it should be enabled.
"""
- bpy.context.scene.ats_settings.is_enabled = enable
+ #bpy.context.scene.ats_settings.is_enabled = enable
+ pass
def set_tile_size(tile_x: int, tile_y: int):
diff --git a/blenderproc/python/utility/Initializer.py b/blenderproc/python/utility/Initializer.py
index 39c4961de..bf11d4948 100644
--- a/blenderproc/python/utility/Initializer.py
+++ b/blenderproc/python/utility/Initializer.py
@@ -56,9 +56,8 @@ def init(horizon_color: list = [0.05, 0.05, 0.05], compute_device: str = "GPU",
if found:
break
# make sure that all visible GPUs are used
- for group in prefs.get_devices():
- for d in group:
- d.use = True
+ for device in prefs.devices:
+ device.use = True
# Set the Experimental features on/off
if use_experimental_features:
diff --git a/blenderproc/python/utility/InstallUtility.py b/blenderproc/python/utility/InstallUtility.py
index 58f1f1bc3..1009ce5a7 100644
--- a/blenderproc/python/utility/InstallUtility.py
+++ b/blenderproc/python/utility/InstallUtility.py
@@ -80,7 +80,7 @@ def make_sure_blender_is_installed(custom_blender_path: str, blender_install_pat
# Determine configured version
# right new only support blender-2.93
- major_version = "2.93"
+ major_version = "3.0"
minor_version = "0"
blender_version = "blender-{}.{}".format(major_version, minor_version)
if platform == "linux" or platform == "linux2":
From 9510b640900f085a078ce13f6ff0a6d5bd5b6bfa Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:16:44 +0100
Subject: [PATCH 16/46] chore(set_samples): rename set_samples to
set_max_amount_of_samples
---
blenderproc/api/renderer/__init__.py | 2 +-
blenderproc/python/modules/renderer/RendererInterface.py | 2 +-
blenderproc/python/renderer/FlowRendererUtility.py | 2 +-
blenderproc/python/renderer/NOCSRendererUtility.py | 2 +-
blenderproc/python/renderer/RendererUtility.py | 4 ++--
blenderproc/python/renderer/SegMapRendererUtility.py | 2 +-
blenderproc/python/utility/Initializer.py | 2 +-
docs/tutorials/renderer.md | 2 +-
examples/advanced/camera_depth_of_field/main.py | 2 +-
examples/advanced/coco_annotations/main.py | 2 +-
examples/advanced/diffuse_color_image/main.py | 2 +-
examples/advanced/dust/main.py | 2 +-
examples/advanced/entity_displacement_modifier/main.py | 2 +-
examples/advanced/lens_distortion/main.py | 2 +-
examples/advanced/lens_distortion/main_callab.py | 2 +-
examples/advanced/material_randomizer/main.py | 2 +-
.../advanced/motion_blur_rolling_shutter/main_motion_blur.py | 2 +-
.../motion_blur_rolling_shutter/main_rolling_shutter.py | 2 +-
examples/advanced/object_pose_sampling/main.py | 2 +-
examples/advanced/on_surface_object_sampling/main.py | 2 +-
examples/advanced/random_room_constructor/main.py | 2 +-
examples/basics/basic/README.md | 2 +-
examples/basics/basic/main.py | 2 +-
examples/basics/camera_object_pose/main.py | 2 +-
examples/basics/camera_sampling/main.py | 2 +-
examples/basics/entity_manipulation/main.py | 2 +-
examples/basics/material_manipulation/main.py | 2 +-
examples/datasets/amass_human_poses/main.py | 2 +-
examples/datasets/blenderkit/main.py | 2 +-
examples/datasets/bop_object_on_surface_sampling/README.md | 2 +-
examples/datasets/bop_object_on_surface_sampling/main.py | 2 +-
examples/datasets/bop_object_physics_positioning/README.md | 2 +-
examples/datasets/bop_object_physics_positioning/main.py | 2 +-
examples/datasets/bop_object_pose_sampling/main.py | 2 +-
examples/datasets/bop_scene_replication/main.py | 2 +-
examples/datasets/front_3d/main.py | 2 +-
examples/datasets/front_3d_with_improved_mat/main.py | 2 +-
examples/datasets/haven/main.py | 2 +-
examples/datasets/ikea/main.py | 2 +-
examples/datasets/pix3d/main.py | 2 +-
examples/datasets/scenenet/main.py | 2 +-
examples/datasets/scenenet_with_cctextures/main.py | 2 +-
examples/datasets/shapenet/README.md | 3 ++-
examples/datasets/shapenet/main.py | 2 +-
examples/datasets/shapenet_with_scenenet/main.py | 2 +-
examples/datasets/shapenet_with_suncg/main.py | 2 +-
examples/datasets/suncg_with_improved_mat/main.py | 2 +-
47 files changed, 49 insertions(+), 48 deletions(-)
diff --git a/blenderproc/api/renderer/__init__.py b/blenderproc/api/renderer/__init__.py
index df732700f..86e7ee739 100644
--- a/blenderproc/api/renderer/__init__.py
+++ b/blenderproc/api/renderer/__init__.py
@@ -1,7 +1,7 @@
from blenderproc.python.renderer.RendererUtility import set_denoiser, set_light_bounces, toggle_auto_tile_size, \
set_tile_size, set_cpu_threads, toggle_stereo, set_simplify_subdivision_render, set_adaptive_sampling, \
- set_samples, enable_distance_output, enable_depth_output, enable_normals_output, enable_diffuse_color_output,\
+ set_max_amount_of_samples, enable_distance_output, enable_depth_output, enable_normals_output, enable_diffuse_color_output,\
map_file_format_to_file_ending, render, set_output_format, enable_motion_blur, set_world_background
from blenderproc.python.renderer.SegMapRendererUtility import render_segmap
from blenderproc.python.renderer.FlowRendererUtility import render_optical_flow
diff --git a/blenderproc/python/modules/renderer/RendererInterface.py b/blenderproc/python/modules/renderer/RendererInterface.py
index 93d7161d4..9e2a990a6 100644
--- a/blenderproc/python/modules/renderer/RendererInterface.py
+++ b/blenderproc/python/modules/renderer/RendererInterface.py
@@ -137,7 +137,7 @@ def _configure_renderer(self, default_samples: int = 256, use_denoiser: bool = F
:param default_denoiser: Either "Intel" or "Blender", "Intel" performs much better in most cases
"""
RendererUtility._render_init()
- RendererUtility.set_samples(self.config.get_int("samples", default_samples))
+ RendererUtility.set_max_amount_of_samples(self.config.get_int("samples", default_samples))
if self.config.has_param("use_adaptive_sampling"):
RendererUtility.set_adaptive_sampling(self.config.get_float("use_adaptive_sampling"))
diff --git a/blenderproc/python/renderer/FlowRendererUtility.py b/blenderproc/python/renderer/FlowRendererUtility.py
index b3e087f8e..b8c5fec99 100644
--- a/blenderproc/python/renderer/FlowRendererUtility.py
+++ b/blenderproc/python/renderer/FlowRendererUtility.py
@@ -42,7 +42,7 @@ def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forwar
with Utility.UndoAfterExecution():
RendererUtility._render_init()
- RendererUtility.set_samples(1)
+ RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_adaptive_sampling(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
diff --git a/blenderproc/python/renderer/NOCSRendererUtility.py b/blenderproc/python/renderer/NOCSRendererUtility.py
index b61602826..a4bdd8bdf 100644
--- a/blenderproc/python/renderer/NOCSRendererUtility.py
+++ b/blenderproc/python/renderer/NOCSRendererUtility.py
@@ -44,7 +44,7 @@ def render_nocs(output_dir: Optional[str] = None, file_prefix: str = "nocs_", ou
# Set all fast rendering parameters with only one ray per pixel
RendererUtility._render_init()
- RendererUtility.set_samples(1)
+ RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_adaptive_sampling(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index b9cdc4677..1450590cb 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -166,7 +166,7 @@ def set_adaptive_sampling(adaptive_threshold: float):
bpy.context.scene.cycles.use_adaptive_sampling = False
-def set_samples(samples: int):
+def set_max_amount_of_samples(samples: int):
""" Sets the number of samples to render for each pixel.
:param samples: The number of samples per pixel
@@ -617,4 +617,4 @@ def set_world_background(color: List[float], strength: float = 1):
links.remove(nodes.get("Background").inputs['Color'].links[0])
nodes.get("Background").inputs['Strength'].default_value = strength
- nodes.get("Background").inputs['Color'].default_value = color + [1]
\ No newline at end of file
+ nodes.get("Background").inputs['Color'].default_value = color + [1]
diff --git a/blenderproc/python/renderer/SegMapRendererUtility.py b/blenderproc/python/renderer/SegMapRendererUtility.py
index c99168f40..4474689e5 100644
--- a/blenderproc/python/renderer/SegMapRendererUtility.py
+++ b/blenderproc/python/renderer/SegMapRendererUtility.py
@@ -47,7 +47,7 @@ def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = No
with Utility.UndoAfterExecution():
RendererUtility._render_init()
- RendererUtility.set_samples(1)
+ RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_adaptive_sampling(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
diff --git a/blenderproc/python/utility/Initializer.py b/blenderproc/python/utility/Initializer.py
index bf11d4948..449f1113c 100644
--- a/blenderproc/python/utility/Initializer.py
+++ b/blenderproc/python/utility/Initializer.py
@@ -116,7 +116,7 @@ def set_default_parameters():
# Init renderer
RendererUtility._render_init()
- RendererUtility.set_samples(DefaultConfig.samples)
+ RendererUtility.set_max_amount_of_samples(DefaultConfig.samples)
addon_utils.enable("render_auto_tile_size")
RendererUtility.toggle_auto_tile_size(True)
diff --git a/docs/tutorials/renderer.md b/docs/tutorials/renderer.md
index 38a1fe26c..1e6a04d3d 100644
--- a/docs/tutorials/renderer.md
+++ b/docs/tutorials/renderer.md
@@ -43,7 +43,7 @@ In [depth images](https://en.wikipedia.org/wiki/Depth_map), each pixel contains
As blender uses a raytracer, the number of rays influences the required amount of computation and the noise in the rendered image.
The more rays are computed, the longer the rendering takes, but the more accurate and less noisy the resulting image is.
-The number of rays can be controlled by using `bproc.renderer.set_samples(num_samples)`.
+The number of rays can be controlled by using `bproc.renderer.set_max_amount_of_samples(num_samples)`.
Hereby, `num_samples` sets the number of rays that are traced per pixel.
For more information about how blenders renderer works visit the [blender docu](https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html).
diff --git a/examples/advanced/camera_depth_of_field/main.py b/examples/advanced/camera_depth_of_field/main.py
index c8bbc3e96..1677b1c10 100644
--- a/examples/advanced/camera_depth_of_field/main.py
+++ b/examples/advanced/camera_depth_of_field/main.py
@@ -42,7 +42,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/coco_annotations/main.py b/examples/advanced/coco_annotations/main.py
index 6ffc42948..b596695b6 100644
--- a/examples/advanced/coco_annotations/main.py
+++ b/examples/advanced/coco_annotations/main.py
@@ -38,7 +38,7 @@
bproc.renderer.enable_normals_output()
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/diffuse_color_image/main.py b/examples/advanced/diffuse_color_image/main.py
index f73a8597c..e482d2c64 100644
--- a/examples/advanced/diffuse_color_image/main.py
+++ b/examples/advanced/diffuse_color_image/main.py
@@ -36,7 +36,7 @@
# Also enable the diffuse color image, which describes the base color of the textures
bproc.renderer.enable_diffuse_color_output()
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/dust/main.py b/examples/advanced/dust/main.py
index 668524d08..dbf6521f3 100644
--- a/examples/advanced/dust/main.py
+++ b/examples/advanced/dust/main.py
@@ -40,7 +40,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/entity_displacement_modifier/main.py b/examples/advanced/entity_displacement_modifier/main.py
index d2daf60e5..694ddd6cd 100644
--- a/examples/advanced/entity_displacement_modifier/main.py
+++ b/examples/advanced/entity_displacement_modifier/main.py
@@ -44,7 +44,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/lens_distortion/main.py b/examples/advanced/lens_distortion/main.py
index a8d75b772..1c9840bb1 100644
--- a/examples/advanced/lens_distortion/main.py
+++ b/examples/advanced/lens_distortion/main.py
@@ -48,7 +48,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_distance_output(activate_antialiasing=True)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/lens_distortion/main_callab.py b/examples/advanced/lens_distortion/main_callab.py
index 8d2597a2d..c9a5ee26a 100644
--- a/examples/advanced/lens_distortion/main_callab.py
+++ b/examples/advanced/lens_distortion/main_callab.py
@@ -46,7 +46,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_distance_output(activate_antialiasing=True)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(20)
+bproc.renderer.set_max_amount_of_samples(20)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/material_randomizer/main.py b/examples/advanced/material_randomizer/main.py
index cd025ef9d..08362bce0 100644
--- a/examples/advanced/material_randomizer/main.py
+++ b/examples/advanced/material_randomizer/main.py
@@ -39,7 +39,7 @@
obj.set_material(i, random.choice(materials))
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py b/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
index 157f3a3a3..473eb7f15 100644
--- a/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
+++ b/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
@@ -38,7 +38,7 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py b/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
index c83af8c13..cc6c4ef71 100644
--- a/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
+++ b/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
@@ -42,7 +42,7 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/object_pose_sampling/main.py b/examples/advanced/object_pose_sampling/main.py
index f90c26b01..a3f9eacbd 100644
--- a/examples/advanced/object_pose_sampling/main.py
+++ b/examples/advanced/object_pose_sampling/main.py
@@ -45,7 +45,7 @@ def sample_pose(obj: bproc.types.MeshObject):
# activate normal rendering
bproc.renderer.enable_normals_output()
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/on_surface_object_sampling/main.py b/examples/advanced/on_surface_object_sampling/main.py
index 2ada66873..7246519a5 100644
--- a/examples/advanced/on_surface_object_sampling/main.py
+++ b/examples/advanced/on_surface_object_sampling/main.py
@@ -58,7 +58,7 @@ def sample_pose(obj: bproc.types.MeshObject):
bproc.camera.add_camera_pose(matrix_world)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/random_room_constructor/main.py b/examples/advanced/random_room_constructor/main.py
index 600f52454..6a891e802 100644
--- a/examples/advanced/random_room_constructor/main.py
+++ b/examples/advanced/random_room_constructor/main.py
@@ -47,7 +47,7 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
bproc.renderer.set_light_bounces(max_bounces=200, diffuse_bounces=200, glossy_bounces=200, transmission_bounces=200, transparent_max_bounces=200)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/basic/README.md b/examples/basics/basic/README.md
index 1c420e138..ab414f10e 100644
--- a/examples/basics/basic/README.md
+++ b/examples/basics/basic/README.md
@@ -111,7 +111,7 @@ location_x location_y location_z rotation_euler_x rotation_euler_y rotation_eul
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.enable_normals_output()
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/basic/main.py b/examples/basics/basic/main.py
index 8851f2115..5d9b63d45 100644
--- a/examples/basics/basic/main.py
+++ b/examples/basics/basic/main.py
@@ -33,7 +33,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/camera_object_pose/main.py b/examples/basics/camera_object_pose/main.py
index 3faf6d65b..fd68b4897 100644
--- a/examples/basics/camera_object_pose/main.py
+++ b/examples/basics/camera_object_pose/main.py
@@ -54,7 +54,7 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(100)
+bproc.renderer.set_max_amount_of_samples(100)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/camera_sampling/main.py b/examples/basics/camera_sampling/main.py
index a516cfb62..74d60ed41 100644
--- a/examples/basics/camera_sampling/main.py
+++ b/examples/basics/camera_sampling/main.py
@@ -34,7 +34,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/entity_manipulation/main.py b/examples/basics/entity_manipulation/main.py
index 45999359f..5403bdda1 100644
--- a/examples/basics/entity_manipulation/main.py
+++ b/examples/basics/entity_manipulation/main.py
@@ -35,7 +35,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/material_manipulation/main.py b/examples/basics/material_manipulation/main.py
index 3deee82e1..146a788a0 100644
--- a/examples/basics/material_manipulation/main.py
+++ b/examples/basics/material_manipulation/main.py
@@ -46,7 +46,7 @@
mat.set_principled_shader_value("Base Color", image)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/amass_human_poses/main.py b/examples/datasets/amass_human_poses/main.py
index 5e43315b1..afcb791bd 100644
--- a/examples/datasets/amass_human_poses/main.py
+++ b/examples/datasets/amass_human_poses/main.py
@@ -40,7 +40,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/blenderkit/main.py b/examples/datasets/blenderkit/main.py
index 7e4f0ea6c..8ded78333 100644
--- a/examples/datasets/blenderkit/main.py
+++ b/examples/datasets/blenderkit/main.py
@@ -34,7 +34,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_on_surface_sampling/README.md b/examples/datasets/bop_object_on_surface_sampling/README.md
index 8bedf2f43..1358a0009 100644
--- a/examples/datasets/bop_object_on_surface_sampling/README.md
+++ b/examples/datasets/bop_object_on_surface_sampling/README.md
@@ -218,7 +218,7 @@ while poses < 10:
```python
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
```
* Renders RGB using 50 `"samples"` and also outputs depth images.
diff --git a/examples/datasets/bop_object_on_surface_sampling/main.py b/examples/datasets/bop_object_on_surface_sampling/main.py
index 9270a7e1b..6a5b05888 100644
--- a/examples/datasets/bop_object_on_surface_sampling/main.py
+++ b/examples/datasets/bop_object_on_surface_sampling/main.py
@@ -113,7 +113,7 @@ def sample_initial_pose(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_physics_positioning/README.md b/examples/datasets/bop_object_physics_positioning/README.md
index 99e189c27..9dcd5e41f 100644
--- a/examples/datasets/bop_object_physics_positioning/README.md
+++ b/examples/datasets/bop_object_physics_positioning/README.md
@@ -235,7 +235,7 @@ while poses < 10:
```python
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
```
* Renders RGB using 50 `"samples"` and also outputs depth images.
diff --git a/examples/datasets/bop_object_physics_positioning/main.py b/examples/datasets/bop_object_physics_positioning/main.py
index 877641f86..ef5727765 100644
--- a/examples/datasets/bop_object_physics_positioning/main.py
+++ b/examples/datasets/bop_object_physics_positioning/main.py
@@ -123,7 +123,7 @@ def sample_pose_func(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_pose_sampling/main.py b/examples/datasets/bop_object_pose_sampling/main.py
index fe57d62c7..29ba278bd 100644
--- a/examples/datasets/bop_object_pose_sampling/main.py
+++ b/examples/datasets/bop_object_pose_sampling/main.py
@@ -37,7 +37,7 @@ def sample_pose_func(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# Render five different scenes
for _ in range(5):
diff --git a/examples/datasets/bop_scene_replication/main.py b/examples/datasets/bop_scene_replication/main.py
index 267d6cf9a..2aecfa2b5 100644
--- a/examples/datasets/bop_scene_replication/main.py
+++ b/examples/datasets/bop_scene_replication/main.py
@@ -29,7 +29,7 @@
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_samples(50)
+bproc.renderer.set_max_amount_of_samples(50)
# render the cameras of the current scene
data = bproc.renderer.render()
diff --git a/examples/datasets/front_3d/main.py b/examples/datasets/front_3d/main.py
index c5641755e..2d21e2675 100644
--- a/examples/datasets/front_3d/main.py
+++ b/examples/datasets/front_3d/main.py
@@ -70,7 +70,7 @@ def check_name(name):
# Also render normals
bproc.renderer.enable_normals_output()
# set the sample amount to 350
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/front_3d_with_improved_mat/main.py b/examples/datasets/front_3d_with_improved_mat/main.py
index 10787a137..99bbb81c5 100644
--- a/examples/datasets/front_3d_with_improved_mat/main.py
+++ b/examples/datasets/front_3d_with_improved_mat/main.py
@@ -101,7 +101,7 @@ def check_name(name):
# Also render normals
bproc.renderer.enable_normals_output()
# set the sample amount to 350
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/haven/main.py b/examples/datasets/haven/main.py
index ad6a06416..64d90940d 100644
--- a/examples/datasets/haven/main.py
+++ b/examples/datasets/haven/main.py
@@ -38,7 +38,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/ikea/main.py b/examples/datasets/ikea/main.py
index a05ba46b0..df2bc835d 100644
--- a/examples/datasets/ikea/main.py
+++ b/examples/datasets/ikea/main.py
@@ -33,7 +33,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/pix3d/main.py b/examples/datasets/pix3d/main.py
index 23117611d..4d48d5752 100644
--- a/examples/datasets/pix3d/main.py
+++ b/examples/datasets/pix3d/main.py
@@ -33,7 +33,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/scenenet/main.py b/examples/datasets/scenenet/main.py
index 9df87a2d6..d6dc7a878 100644
--- a/examples/datasets/scenenet/main.py
+++ b/examples/datasets/scenenet/main.py
@@ -77,7 +77,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/scenenet_with_cctextures/main.py b/examples/datasets/scenenet_with_cctextures/main.py
index 3ef4866a6..4c7a8e523 100644
--- a/examples/datasets/scenenet_with_cctextures/main.py
+++ b/examples/datasets/scenenet_with_cctextures/main.py
@@ -94,7 +94,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet/README.md b/examples/datasets/shapenet/README.md
index f26375845..1c612efa0 100644
--- a/examples/datasets/shapenet/README.md
+++ b/examples/datasets/shapenet/README.md
@@ -77,12 +77,13 @@ Each cameras rotation is such that it looks directly at the object and the camer
## RGB Renderer
+
```python
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet/main.py b/examples/datasets/shapenet/main.py
index 91f36f61d..f0d809fab 100644
--- a/examples/datasets/shapenet/main.py
+++ b/examples/datasets/shapenet/main.py
@@ -31,7 +31,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet_with_scenenet/main.py b/examples/datasets/shapenet_with_scenenet/main.py
index c51cb4021..5fa44a33b 100644
--- a/examples/datasets/shapenet_with_scenenet/main.py
+++ b/examples/datasets/shapenet_with_scenenet/main.py
@@ -87,7 +87,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(150)
+bproc.renderer.set_max_amount_of_samples(150)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet_with_suncg/main.py b/examples/datasets/shapenet_with_suncg/main.py
index d521b2c75..2ef01cc6a 100644
--- a/examples/datasets/shapenet_with_suncg/main.py
+++ b/examples/datasets/shapenet_with_suncg/main.py
@@ -54,7 +54,7 @@
bproc.camera.add_camera_pose(cam2world_matrix)
# set the number of samples to render for each object
-bproc.renderer.set_samples(150)
+bproc.renderer.set_max_amount_of_samples(150)
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
diff --git a/examples/datasets/suncg_with_improved_mat/main.py b/examples/datasets/suncg_with_improved_mat/main.py
index d7428cc0a..aa32f8bda 100644
--- a/examples/datasets/suncg_with_improved_mat/main.py
+++ b/examples/datasets/suncg_with_improved_mat/main.py
@@ -69,7 +69,7 @@
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_samples(350)
+bproc.renderer.set_max_amount_of_samples(350)
bproc.material.add_alpha_channel_to_textures(blurry_edges=True)
From 8f34699dabadc6d0390c0da655214ca5f8e82320 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:26:14 +0100
Subject: [PATCH 17/46] chore(set_noise_threshold): rename
set_adaptive_sampling fct. to better reflect its use-case.
---
blenderproc/api/renderer/__init__.py | 2 +-
blenderproc/python/modules/renderer/RendererInterface.py | 2 +-
blenderproc/python/renderer/FlowRendererUtility.py | 2 +-
blenderproc/python/renderer/NOCSRendererUtility.py | 2 +-
blenderproc/python/renderer/RendererUtility.py | 9 ++++-----
blenderproc/python/renderer/SegMapRendererUtility.py | 2 +-
6 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/blenderproc/api/renderer/__init__.py b/blenderproc/api/renderer/__init__.py
index 86e7ee739..061e42314 100644
--- a/blenderproc/api/renderer/__init__.py
+++ b/blenderproc/api/renderer/__init__.py
@@ -1,6 +1,6 @@
from blenderproc.python.renderer.RendererUtility import set_denoiser, set_light_bounces, toggle_auto_tile_size, \
- set_tile_size, set_cpu_threads, toggle_stereo, set_simplify_subdivision_render, set_adaptive_sampling, \
+ set_tile_size, set_cpu_threads, toggle_stereo, set_simplify_subdivision_render, set_noise_threshold, \
set_max_amount_of_samples, enable_distance_output, enable_depth_output, enable_normals_output, enable_diffuse_color_output,\
map_file_format_to_file_ending, render, set_output_format, enable_motion_blur, set_world_background
from blenderproc.python.renderer.SegMapRendererUtility import render_segmap
diff --git a/blenderproc/python/modules/renderer/RendererInterface.py b/blenderproc/python/modules/renderer/RendererInterface.py
index 9e2a990a6..0d9428c47 100644
--- a/blenderproc/python/modules/renderer/RendererInterface.py
+++ b/blenderproc/python/modules/renderer/RendererInterface.py
@@ -140,7 +140,7 @@ def _configure_renderer(self, default_samples: int = 256, use_denoiser: bool = F
RendererUtility.set_max_amount_of_samples(self.config.get_int("samples", default_samples))
if self.config.has_param("use_adaptive_sampling"):
- RendererUtility.set_adaptive_sampling(self.config.get_float("use_adaptive_sampling"))
+ RendererUtility.set_noise_threshold(self.config.get_float("use_adaptive_sampling"))
if self.config.get_bool("auto_tile_size", True):
RendererUtility.toggle_auto_tile_size(True)
diff --git a/blenderproc/python/renderer/FlowRendererUtility.py b/blenderproc/python/renderer/FlowRendererUtility.py
index b8c5fec99..1791d0ca8 100644
--- a/blenderproc/python/renderer/FlowRendererUtility.py
+++ b/blenderproc/python/renderer/FlowRendererUtility.py
@@ -43,7 +43,7 @@ def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forwar
with Utility.UndoAfterExecution():
RendererUtility._render_init()
RendererUtility.set_max_amount_of_samples(1)
- RendererUtility.set_adaptive_sampling(0)
+ RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
diff --git a/blenderproc/python/renderer/NOCSRendererUtility.py b/blenderproc/python/renderer/NOCSRendererUtility.py
index a4bdd8bdf..5e8d532b9 100644
--- a/blenderproc/python/renderer/NOCSRendererUtility.py
+++ b/blenderproc/python/renderer/NOCSRendererUtility.py
@@ -45,7 +45,7 @@ def render_nocs(output_dir: Optional[str] = None, file_prefix: str = "nocs_", ou
# Set all fast rendering parameters with only one ray per pixel
RendererUtility._render_init()
RendererUtility.set_max_amount_of_samples(1)
- RendererUtility.set_adaptive_sampling(0)
+ RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
bpy.context.scene.cycles.filter_width = 0.0
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index 1450590cb..2b84a12eb 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -152,16 +152,15 @@ def set_simplify_subdivision_render(simplify_subdivision_render: int):
bpy.context.scene.render.use_simplify = False
-def set_adaptive_sampling(adaptive_threshold: float):
- """ Configures adaptive sampling.
-
+def set_noise_threshold(noise_threshold: float):
+ """ Configures the adaptive sampling, the noise threshold is typically between 0.1 and 0.001.
Adaptive sampling automatically decreases the number of samples per pixel based on estimated level of noise.
:param adaptive_threshold: Noise level to stop sampling at. If 0 is given, adaptive sampling is disabled.
"""
- if adaptive_threshold > 0:
+ if noise_threshold > 0:
bpy.context.scene.cycles.use_adaptive_sampling = True
- bpy.context.scene.cycles.adaptive_threshold = adaptive_threshold
+ bpy.context.scene.cycles.adaptive_threshold = noise_threshold
else:
bpy.context.scene.cycles.use_adaptive_sampling = False
diff --git a/blenderproc/python/renderer/SegMapRendererUtility.py b/blenderproc/python/renderer/SegMapRendererUtility.py
index 4474689e5..9fc16b3ce 100644
--- a/blenderproc/python/renderer/SegMapRendererUtility.py
+++ b/blenderproc/python/renderer/SegMapRendererUtility.py
@@ -48,7 +48,7 @@ def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = No
with Utility.UndoAfterExecution():
RendererUtility._render_init()
RendererUtility.set_max_amount_of_samples(1)
- RendererUtility.set_adaptive_sampling(0)
+ RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)
From 66dd14ab4608ea27d32c2deccdaae6e9b7cf5a90 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:26:32 +0100
Subject: [PATCH 18/46] doc(set_noise_threshold): improve documentation
---
blenderproc/python/renderer/RendererUtility.py | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index 2b84a12eb..9e322f053 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -156,7 +156,13 @@ def set_noise_threshold(noise_threshold: float):
""" Configures the adaptive sampling, the noise threshold is typically between 0.1 and 0.001.
Adaptive sampling automatically decreases the number of samples per pixel based on estimated level of noise.
- :param adaptive_threshold: Noise level to stop sampling at. If 0 is given, adaptive sampling is disabled.
+ We do not recommend setting the noise threshold value to zero and therefore turning off the adaptive sampling.
+
+ For more information see the official documentation:
+ https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html#adaptive-sampling
+
+ :param noise_threshold: Noise level to stop sampling at. If 0 is given, adaptive sampling is disabled and only the
+ max amount of samples is used.
"""
if noise_threshold > 0:
bpy.context.scene.cycles.use_adaptive_sampling = True
@@ -166,12 +172,15 @@ def set_noise_threshold(noise_threshold: float):
def set_max_amount_of_samples(samples: int):
- """ Sets the number of samples to render for each pixel.
+ """ Sets the maximum number of samples to render for each pixel.
+ This maximum amount is usually not reached if the noise threshold is low enough.
+ If the noise threshold was set to 0, then only the maximum number of samples is used (We do not recommend this).
- :param samples: The number of samples per pixel
+ :param samples: The maximum number of samples per pixel
"""
bpy.context.scene.cycles.samples = samples
+
def enable_distance_output(activate_antialiasing: bool, output_dir: Optional[str] = None, file_prefix: str = "distance_",
output_key: str = "distance", antialiasing_distance_max: float = None,
convert_to_depth: bool = False):
From 5a0ff1360266f3ee4da33f96abfef5dfacb1c239 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:35:11 +0100
Subject: [PATCH 19/46] feat(auto_tile): remove the auto tile addon as it is
not longer needed with Cycles X, all functions to it are also removed
---
blenderproc/README.md | 8 +++----
blenderproc/api/renderer/__init__.py | 4 ++--
.../modules/renderer/RendererInterface.py | 17 --------------
.../python/renderer/RendererUtility.py | 22 -------------------
blenderproc/python/utility/DefaultConfig.py | 5 +++--
blenderproc/python/utility/Initializer.py | 3 +--
6 files changed, 10 insertions(+), 49 deletions(-)
diff --git a/blenderproc/README.md b/blenderproc/README.md
index b6ca7f16f..bc1f42bf0 100644
--- a/blenderproc/README.md
+++ b/blenderproc/README.md
@@ -67,14 +67,14 @@ Config file:
"config": {
"global": {
"output_dir": "/tmp/",
- "auto_tile_size": False
+ "max_bounces": False
}
}
},
{
"module": "renderer.NewRenderer",
"config": {
- "auto_tile_size": True,
+ "max_bounces": True,
"cycles": {
"samples": 255
}
@@ -96,13 +96,13 @@ self.get_float("pixel_aspect_x")
self.get_string("output_dir", "output/")
# -> /tmp/ this value is drawn from the GlobalStorage
-self.get_bool("auto_tile_size")
+self.get_bool("max_bounces")
# -> True
self.config.get_int("resolution_x", 512)
# -> 512
-self.config.get_int("tile_x")
+self.config.get_int("example_value")
# -> throws an error
```
diff --git a/blenderproc/api/renderer/__init__.py b/blenderproc/api/renderer/__init__.py
index 061e42314..9840cb919 100644
--- a/blenderproc/api/renderer/__init__.py
+++ b/blenderproc/api/renderer/__init__.py
@@ -1,6 +1,6 @@
-from blenderproc.python.renderer.RendererUtility import set_denoiser, set_light_bounces, toggle_auto_tile_size, \
- set_tile_size, set_cpu_threads, toggle_stereo, set_simplify_subdivision_render, set_noise_threshold, \
+from blenderproc.python.renderer.RendererUtility import set_denoiser, set_light_bounces, \
+ set_cpu_threads, toggle_stereo, set_simplify_subdivision_render, set_noise_threshold, \
set_max_amount_of_samples, enable_distance_output, enable_depth_output, enable_normals_output, enable_diffuse_color_output,\
map_file_format_to_file_ending, render, set_output_format, enable_motion_blur, set_world_background
from blenderproc.python.renderer.SegMapRendererUtility import render_segmap
diff --git a/blenderproc/python/modules/renderer/RendererInterface.py b/blenderproc/python/modules/renderer/RendererInterface.py
index 0d9428c47..283b3f9f1 100644
--- a/blenderproc/python/modules/renderer/RendererInterface.py
+++ b/blenderproc/python/modules/renderer/RendererInterface.py
@@ -33,16 +33,6 @@ class RendererInterface(Module):
This means pixel is sampled until the noise level is smaller than specified or the maximum amount of
samples were reached. Do not use this with Non-RGB-Renders! Only used if specified" in config. Default: 0.0
- float
- * - auto_tile_size
- - If true, then the number of render tiles is set automatically using the render_auto_tile_size addon.
- Default: True.
- - bool
- * - tile_x
- - The number of separate render tiles to use along the x-axis. Ignored if auto_tile_size is set to true.
- - int
- * - tile_y
- - The number of separate render tiles to use along the y-axis. Ignored if auto_tile_size is set to true.
- - int
* - simplify_subdivision_render
- Global maximum subdivision level during rendering. Speeds up rendering. Default: 3
- int
@@ -125,7 +115,6 @@ class RendererInterface(Module):
def __init__(self, config: Config):
Module.__init__(self, config)
- addon_utils.enable("render_auto_tile_size")
def _configure_renderer(self, default_samples: int = 256, use_denoiser: bool = False,
default_denoiser: str = "Intel"):
@@ -142,12 +131,6 @@ def _configure_renderer(self, default_samples: int = 256, use_denoiser: bool = F
if self.config.has_param("use_adaptive_sampling"):
RendererUtility.set_noise_threshold(self.config.get_float("use_adaptive_sampling"))
- if self.config.get_bool("auto_tile_size", True):
- RendererUtility.toggle_auto_tile_size(True)
- else:
- RendererUtility.toggle_auto_tile_size(False)
- RendererUtility.set_tile_size(self.config.get_int("tile_x"), self.config.get_int("tile_y"))
-
# Set number of cpu cores used for rendering (1 thread is always used for coordination => 1
# cpu thread means GPU-only rendering)
RendererUtility.set_cpu_threads(self.config.get_int("cpu_threads", 0))
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index 9e322f053..cf04d0169 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -95,28 +95,6 @@ def set_light_bounces(diffuse_bounces: Optional[int] = None, glossy_bounces: Opt
bpy.context.scene.cycles.volume_bounces = volume_bounces
-def toggle_auto_tile_size(enable: bool):
- """ Enables/Disables the automatic tile size detection via the render_auto_tile_size addon.
-
- :param enable: True, if it should be enabled.
- """
- #bpy.context.scene.ats_settings.is_enabled = enable
- pass
-
-
-def set_tile_size(tile_x: int, tile_y: int):
- """ Sets the rendering tile size.
-
- This will automatically disable the automatic tile size detection.
-
- :param tile_x: The horizontal tile size in pixels.
- :param tile_y: The vertical tile size in pixels.
- """
- toggle_auto_tile_size(False)
- bpy.context.scene.render.tile_x = tile_x
- bpy.context.scene.render.tile_y = tile_y
-
-
def set_cpu_threads(num_threads: int):
""" Sets the number of CPU cores to use simultaneously while rendering.
diff --git a/blenderproc/python/utility/DefaultConfig.py b/blenderproc/python/utility/DefaultConfig.py
index f9e5514bc..d9fb1a0ea 100644
--- a/blenderproc/python/utility/DefaultConfig.py
+++ b/blenderproc/python/utility/DefaultConfig.py
@@ -22,7 +22,8 @@ class DefaultConfig:
color_depth = 8
enable_transparency = False
jpg_quality = 95
- samples = 100
+ samples = 1024
+ sampling_noise_threshold = 0.01
cpu_threads = 1
denoiser = "INTEL"
simplify_subdivision_render = 3
@@ -37,4 +38,4 @@ class DefaultConfig:
# Setup
default_pip_packages = ["wheel", "pyyaml==5.1.2", "imageio==2.9.0", "gitpython==3.1.18", "scikit-image==0.18.3", "pypng==0.0.20", "scipy==1.7.1",
- "matplotlib==3.4.3", "pytz==2021.1", "h5py==3.4.0", "Pillow==8.3.2", "opencv-contrib-python==4.5.3.56", "scikit-learn==0.24.2"]
\ No newline at end of file
+ "matplotlib==3.4.3", "pytz==2021.1", "h5py==3.4.0", "Pillow==8.3.2", "opencv-contrib-python==4.5.3.56", "scikit-learn==0.24.2"]
diff --git a/blenderproc/python/utility/Initializer.py b/blenderproc/python/utility/Initializer.py
index 449f1113c..7238dbd10 100644
--- a/blenderproc/python/utility/Initializer.py
+++ b/blenderproc/python/utility/Initializer.py
@@ -117,8 +117,7 @@ def set_default_parameters():
# Init renderer
RendererUtility._render_init()
RendererUtility.set_max_amount_of_samples(DefaultConfig.samples)
- addon_utils.enable("render_auto_tile_size")
- RendererUtility.toggle_auto_tile_size(True)
+ RendererUtility.set_noise_threshold(DefaultConfig.sampling_noise_threshold)
# Set number of cpu cores used for rendering (1 thread is always used for coordination => 1
# cpu thread means GPU-only rendering)
From 79df0c97a6ea1ca0e2626f6fad2693829652fdef Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:45:10 +0100
Subject: [PATCH 20/46] doc(set_max_amount_of_samples): improve tutorial
introduction to this fct.
---
blenderproc/python/renderer/FlowRendererUtility.py | 1 +
blenderproc/python/renderer/NOCSRendererUtility.py | 1 +
blenderproc/python/renderer/SegMapRendererUtility.py | 1 +
docs/tutorials/renderer.md | 12 +++++++-----
4 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/blenderproc/python/renderer/FlowRendererUtility.py b/blenderproc/python/renderer/FlowRendererUtility.py
index 1791d0ca8..de77284f0 100644
--- a/blenderproc/python/renderer/FlowRendererUtility.py
+++ b/blenderproc/python/renderer/FlowRendererUtility.py
@@ -42,6 +42,7 @@ def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forwar
with Utility.UndoAfterExecution():
RendererUtility._render_init()
+ # the amount of samples must be one and there can not be any noise threshold
RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
diff --git a/blenderproc/python/renderer/NOCSRendererUtility.py b/blenderproc/python/renderer/NOCSRendererUtility.py
index 5e8d532b9..9931b969c 100644
--- a/blenderproc/python/renderer/NOCSRendererUtility.py
+++ b/blenderproc/python/renderer/NOCSRendererUtility.py
@@ -44,6 +44,7 @@ def render_nocs(output_dir: Optional[str] = None, file_prefix: str = "nocs_", ou
# Set all fast rendering parameters with only one ray per pixel
RendererUtility._render_init()
+ # the amount of samples must be one and there can not be any noise threshold
RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
diff --git a/blenderproc/python/renderer/SegMapRendererUtility.py b/blenderproc/python/renderer/SegMapRendererUtility.py
index 9fc16b3ce..d184c0f8e 100644
--- a/blenderproc/python/renderer/SegMapRendererUtility.py
+++ b/blenderproc/python/renderer/SegMapRendererUtility.py
@@ -47,6 +47,7 @@ def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = No
with Utility.UndoAfterExecution():
RendererUtility._render_init()
+ # the amount of samples must be one and there can not be any noise threshold
RendererUtility.set_max_amount_of_samples(1)
RendererUtility.set_noise_threshold(0)
RendererUtility.set_denoiser(None)
diff --git a/docs/tutorials/renderer.md b/docs/tutorials/renderer.md
index 1e6a04d3d..7236af784 100644
--- a/docs/tutorials/renderer.md
+++ b/docs/tutorials/renderer.md
@@ -43,16 +43,18 @@ In [depth images](https://en.wikipedia.org/wiki/Depth_map), each pixel contains
As blender uses a raytracer, the number of rays influences the required amount of computation and the noise in the rendered image.
The more rays are computed, the longer the rendering takes, but the more accurate and less noisy the resulting image is.
-The number of rays can be controlled by using `bproc.renderer.set_max_amount_of_samples(num_samples)`.
-Hereby, `num_samples` sets the number of rays that are traced per pixel.
+The noise level can be controlled by using `brpoc.renderer.set_noise_threshold(noise_threshold)`.
+This means that for each pixel only so many rays are used to get below this noise threshold.
+Hereby, `noise_threshold` is a float value above `0` and below `0.1`.
+A higher value means more noise per pixel, a lower value results in less noise but longer computation time.
+You can influence the maximum amount of samples per pixel with the `bproc.rendererset_max_amount_of_samples(max_amount_of_samples)` fct.
For more information about how blenders renderer works visit the [blender docu](https://docs.blender.org/manual/en/latest/render/cycles/render_settings/sampling.html).
-The required amount of samples is unfortunately quite high to achieve a smooth result and therefore rendering can take quite long.
-To reduce the number of required samples, blender offers Denoiser to reduce the noise in the resulting image.
+The required noise level is unfortunately quite low to achieve a smooth result and therefore rendering can take quite long.
+To reduce the number of required samples per pixel, blender offers Denoiser to reduce the noise in the resulting image.
Set them via `bproc.renderer.set_denoiser`:
* `bproc.renderer.set_denoiser("INTEL")`: Activates Intels [Open Image Denoiser](https://www.openimagedenoise.org/)
-* `bproc.renderer.set_denoiser("BLENDER")`: Uses blenders built-in denoiser.
* `bproc.renderer.set_denoiser(None)`: Deactivates any denoiser.
Per default "INTEL" is used.
From 4e5ed38f1eddb40aec4ab4479b892bb4f3b86b07 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:45:37 +0100
Subject: [PATCH 21/46] fix(RenderUtility): remove the BLENDER denoiser as it
is no longer available in blender 3.0.0
---
blenderproc/python/renderer/RendererUtility.py | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/blenderproc/python/renderer/RendererUtility.py b/blenderproc/python/renderer/RendererUtility.py
index cf04d0169..c69b182eb 100644
--- a/blenderproc/python/renderer/RendererUtility.py
+++ b/blenderproc/python/renderer/RendererUtility.py
@@ -19,7 +19,7 @@ def set_denoiser(denoiser: Optional[str]):
Automatically disables all previously activated denoiser.
- :param denoiser: The name of the denoiser which should be enabled. Options are "INTEL", "OPTIX", "BLENDER" and None. \
+ :param denoiser: The name of the denoiser which should be enabled. Options are "INTEL", "OPTIX" and None. \
If None is given, then no denoiser will be active.
"""
# Make sure there is no denoiser active
@@ -54,10 +54,6 @@ def set_denoiser(denoiser: Optional[str]):
links.new(render_layer_node.outputs['DiffCol'], denoise_node.inputs['Albedo'])
links.new(render_layer_node.outputs['Normal'], denoise_node.inputs['Normal'])
- elif denoiser.upper() == "BLENDER":
- bpy.context.scene.cycles.use_denoising = True
- bpy.context.view_layer.cycles.use_denoising = True
- bpy.context.scene.cycles.denoiser = "NLM"
else:
raise Exception("No such denoiser: " + denoiser)
From d69155acaeae827f048825a30a77754e808b0902 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:51:29 +0100
Subject: [PATCH 22/46] chore(set_max_amount_of_samples): remove from all
examples except the basic one, as the default value should work in all
examples
---
examples/advanced/camera_depth_of_field/main.py | 2 --
examples/advanced/coco_annotations/main.py | 3 ---
examples/advanced/diffuse_color_image/main.py | 2 --
examples/advanced/dust/main.py | 2 --
examples/advanced/entity_displacement_modifier/main.py | 2 --
examples/advanced/lens_distortion/main.py | 2 --
examples/advanced/lens_distortion/main_callab.py | 2 --
examples/advanced/material_randomizer/main.py | 3 ---
.../advanced/motion_blur_rolling_shutter/main_motion_blur.py | 2 --
.../motion_blur_rolling_shutter/main_rolling_shutter.py | 2 --
examples/advanced/object_pose_sampling/main.py | 2 --
examples/advanced/on_surface_object_sampling/main.py | 3 ---
examples/advanced/random_room_constructor/main.py | 2 --
examples/basics/basic/README.md | 2 --
examples/basics/basic/main.py | 2 --
examples/basics/camera_object_pose/main.py | 2 --
examples/basics/camera_sampling/main.py | 2 --
examples/basics/entity_manipulation/main.py | 2 --
examples/basics/material_manipulation/main.py | 3 ---
examples/datasets/amass_human_poses/main.py | 2 --
examples/datasets/blenderkit/main.py | 2 --
examples/datasets/bop_object_on_surface_sampling/README.md | 3 +--
examples/datasets/bop_object_on_surface_sampling/main.py | 1 -
examples/datasets/bop_object_physics_positioning/README.md | 3 +--
examples/datasets/bop_object_physics_positioning/main.py | 1 -
examples/datasets/bop_object_pose_sampling/main.py | 1 -
examples/datasets/bop_scene_replication/main.py | 1 -
examples/datasets/front_3d/main.py | 2 --
examples/datasets/front_3d_with_improved_mat/main.py | 2 --
examples/datasets/haven/main.py | 2 --
examples/datasets/ikea/main.py | 2 --
examples/datasets/pix3d/main.py | 2 --
examples/datasets/scenenet/main.py | 2 --
examples/datasets/scenenet_with_cctextures/main.py | 2 --
examples/datasets/shapenet/README.md | 2 --
examples/datasets/shapenet/main.py | 2 --
examples/datasets/shapenet_with_scenenet/main.py | 2 --
examples/datasets/shapenet_with_suncg/main.py | 3 ---
examples/datasets/suncg_with_improved_mat/main.py | 2 --
39 files changed, 2 insertions(+), 79 deletions(-)
diff --git a/examples/advanced/camera_depth_of_field/main.py b/examples/advanced/camera_depth_of_field/main.py
index 1677b1c10..9954d8f4b 100644
--- a/examples/advanced/camera_depth_of_field/main.py
+++ b/examples/advanced/camera_depth_of_field/main.py
@@ -41,8 +41,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/coco_annotations/main.py b/examples/advanced/coco_annotations/main.py
index b596695b6..054eb8ca2 100644
--- a/examples/advanced/coco_annotations/main.py
+++ b/examples/advanced/coco_annotations/main.py
@@ -37,9 +37,6 @@
# activate normal rendering
bproc.renderer.enable_normals_output()
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(50)
-
# render the whole pipeline
data = bproc.renderer.render()
seg_data = bproc.renderer.render_segmap(map_by=["instance", "class", "name"])
diff --git a/examples/advanced/diffuse_color_image/main.py b/examples/advanced/diffuse_color_image/main.py
index e482d2c64..567bd1895 100644
--- a/examples/advanced/diffuse_color_image/main.py
+++ b/examples/advanced/diffuse_color_image/main.py
@@ -35,8 +35,6 @@
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# Also enable the diffuse color image, which describes the base color of the textures
bproc.renderer.enable_diffuse_color_output()
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/dust/main.py b/examples/advanced/dust/main.py
index dbf6521f3..c703522e3 100644
--- a/examples/advanced/dust/main.py
+++ b/examples/advanced/dust/main.py
@@ -39,8 +39,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/entity_displacement_modifier/main.py b/examples/advanced/entity_displacement_modifier/main.py
index 694ddd6cd..59bdfb7bb 100644
--- a/examples/advanced/entity_displacement_modifier/main.py
+++ b/examples/advanced/entity_displacement_modifier/main.py
@@ -43,8 +43,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/lens_distortion/main.py b/examples/advanced/lens_distortion/main.py
index 1c9840bb1..0d512fa63 100644
--- a/examples/advanced/lens_distortion/main.py
+++ b/examples/advanced/lens_distortion/main.py
@@ -47,8 +47,6 @@
# activate normal and distance rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_distance_output(activate_antialiasing=True)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/lens_distortion/main_callab.py b/examples/advanced/lens_distortion/main_callab.py
index c9a5ee26a..ad9369e9a 100644
--- a/examples/advanced/lens_distortion/main_callab.py
+++ b/examples/advanced/lens_distortion/main_callab.py
@@ -45,8 +45,6 @@
# activate normal and distance rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_distance_output(activate_antialiasing=True)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(20)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/material_randomizer/main.py b/examples/advanced/material_randomizer/main.py
index 08362bce0..2a700327e 100644
--- a/examples/advanced/material_randomizer/main.py
+++ b/examples/advanced/material_randomizer/main.py
@@ -38,9 +38,6 @@
# Replace the material with a random one
obj.set_material(i, random.choice(materials))
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
-
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py b/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
index 473eb7f15..9688b53b7 100644
--- a/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
+++ b/examples/advanced/motion_blur_rolling_shutter/main_motion_blur.py
@@ -37,8 +37,6 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py b/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
index cc6c4ef71..a7c4232ef 100644
--- a/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
+++ b/examples/advanced/motion_blur_rolling_shutter/main_rolling_shutter.py
@@ -41,8 +41,6 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/object_pose_sampling/main.py b/examples/advanced/object_pose_sampling/main.py
index a3f9eacbd..61ce6ecf5 100644
--- a/examples/advanced/object_pose_sampling/main.py
+++ b/examples/advanced/object_pose_sampling/main.py
@@ -44,8 +44,6 @@ def sample_pose(obj: bproc.types.MeshObject):
# activate normal rendering
bproc.renderer.enable_normals_output()
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/on_surface_object_sampling/main.py b/examples/advanced/on_surface_object_sampling/main.py
index 7246519a5..3390ca2db 100644
--- a/examples/advanced/on_surface_object_sampling/main.py
+++ b/examples/advanced/on_surface_object_sampling/main.py
@@ -57,9 +57,6 @@ def sample_pose(obj: bproc.types.MeshObject):
matrix_world = bproc.math.build_transformation_mat(position, euler_rotation)
bproc.camera.add_camera_pose(matrix_world)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
-
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/advanced/random_room_constructor/main.py b/examples/advanced/random_room_constructor/main.py
index 6a891e802..3889a1b18 100644
--- a/examples/advanced/random_room_constructor/main.py
+++ b/examples/advanced/random_room_constructor/main.py
@@ -46,8 +46,6 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
bproc.renderer.set_light_bounces(max_bounces=200, diffuse_bounces=200, glossy_bounces=200, transmission_bounces=200, transparent_max_bounces=200)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/basic/README.md b/examples/basics/basic/README.md
index ab414f10e..4207a17c3 100644
--- a/examples/basics/basic/README.md
+++ b/examples/basics/basic/README.md
@@ -110,8 +110,6 @@ location_x location_y location_z rotation_euler_x rotation_euler_y rotation_eul
# activate normal and depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.enable_normals_output()
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/basic/main.py b/examples/basics/basic/main.py
index 5d9b63d45..b4b8ad78e 100644
--- a/examples/basics/basic/main.py
+++ b/examples/basics/basic/main.py
@@ -32,8 +32,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/camera_object_pose/main.py b/examples/basics/camera_object_pose/main.py
index fd68b4897..2ecb5ba05 100644
--- a/examples/basics/camera_object_pose/main.py
+++ b/examples/basics/camera_object_pose/main.py
@@ -53,8 +53,6 @@
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(100)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/camera_sampling/main.py b/examples/basics/camera_sampling/main.py
index 74d60ed41..b7af11558 100644
--- a/examples/basics/camera_sampling/main.py
+++ b/examples/basics/camera_sampling/main.py
@@ -33,8 +33,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/entity_manipulation/main.py b/examples/basics/entity_manipulation/main.py
index 5403bdda1..4e5450c66 100644
--- a/examples/basics/entity_manipulation/main.py
+++ b/examples/basics/entity_manipulation/main.py
@@ -34,8 +34,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/basics/material_manipulation/main.py b/examples/basics/material_manipulation/main.py
index 146a788a0..1c95483a7 100644
--- a/examples/basics/material_manipulation/main.py
+++ b/examples/basics/material_manipulation/main.py
@@ -45,9 +45,6 @@
# Set it as base color of the current material
mat.set_principled_shader_value("Base Color", image)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
-
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/amass_human_poses/main.py b/examples/datasets/amass_human_poses/main.py
index afcb791bd..2fcd7a215 100644
--- a/examples/datasets/amass_human_poses/main.py
+++ b/examples/datasets/amass_human_poses/main.py
@@ -39,8 +39,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/blenderkit/main.py b/examples/datasets/blenderkit/main.py
index 8ded78333..4a0fa1776 100644
--- a/examples/datasets/blenderkit/main.py
+++ b/examples/datasets/blenderkit/main.py
@@ -33,8 +33,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_on_surface_sampling/README.md b/examples/datasets/bop_object_on_surface_sampling/README.md
index 1358a0009..1a7eedcd0 100644
--- a/examples/datasets/bop_object_on_surface_sampling/README.md
+++ b/examples/datasets/bop_object_on_surface_sampling/README.md
@@ -218,9 +218,8 @@ while poses < 10:
```python
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
```
-* Renders RGB using 50 `"samples"` and also outputs depth images.
+* Renders an RGB image and also outputs a depth images.
### Bop Writer
diff --git a/examples/datasets/bop_object_on_surface_sampling/main.py b/examples/datasets/bop_object_on_surface_sampling/main.py
index 6a5b05888..969fc3214 100644
--- a/examples/datasets/bop_object_on_surface_sampling/main.py
+++ b/examples/datasets/bop_object_on_surface_sampling/main.py
@@ -113,7 +113,6 @@ def sample_initial_pose(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_physics_positioning/README.md b/examples/datasets/bop_object_physics_positioning/README.md
index 9dcd5e41f..42a5333a4 100644
--- a/examples/datasets/bop_object_physics_positioning/README.md
+++ b/examples/datasets/bop_object_physics_positioning/README.md
@@ -235,9 +235,8 @@ while poses < 10:
```python
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
```
-* Renders RGB using 50 `"samples"` and also outputs depth images.
+* Renders an RGB image and also outputs a depth images.
### Bop Writer
diff --git a/examples/datasets/bop_object_physics_positioning/main.py b/examples/datasets/bop_object_physics_positioning/main.py
index ef5727765..2ba7f11ee 100644
--- a/examples/datasets/bop_object_physics_positioning/main.py
+++ b/examples/datasets/bop_object_physics_positioning/main.py
@@ -123,7 +123,6 @@ def sample_pose_func(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/bop_object_pose_sampling/main.py b/examples/datasets/bop_object_pose_sampling/main.py
index 29ba278bd..d9a78cea5 100644
--- a/examples/datasets/bop_object_pose_sampling/main.py
+++ b/examples/datasets/bop_object_pose_sampling/main.py
@@ -37,7 +37,6 @@ def sample_pose_func(obj: bproc.types.MeshObject):
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
# Render five different scenes
for _ in range(5):
diff --git a/examples/datasets/bop_scene_replication/main.py b/examples/datasets/bop_scene_replication/main.py
index 2aecfa2b5..2d2970e79 100644
--- a/examples/datasets/bop_scene_replication/main.py
+++ b/examples/datasets/bop_scene_replication/main.py
@@ -29,7 +29,6 @@
# activate depth rendering and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-bproc.renderer.set_max_amount_of_samples(50)
# render the cameras of the current scene
data = bproc.renderer.render()
diff --git a/examples/datasets/front_3d/main.py b/examples/datasets/front_3d/main.py
index 2d21e2675..99bb66049 100644
--- a/examples/datasets/front_3d/main.py
+++ b/examples/datasets/front_3d/main.py
@@ -69,8 +69,6 @@ def check_name(name):
# Also render normals
bproc.renderer.enable_normals_output()
-# set the sample amount to 350
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/front_3d_with_improved_mat/main.py b/examples/datasets/front_3d_with_improved_mat/main.py
index 99bbb81c5..87116d58c 100644
--- a/examples/datasets/front_3d_with_improved_mat/main.py
+++ b/examples/datasets/front_3d_with_improved_mat/main.py
@@ -100,8 +100,6 @@ def check_name(name):
# Also render normals
bproc.renderer.enable_normals_output()
-# set the sample amount to 350
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/haven/main.py b/examples/datasets/haven/main.py
index 64d90940d..738317c2b 100644
--- a/examples/datasets/haven/main.py
+++ b/examples/datasets/haven/main.py
@@ -37,8 +37,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/ikea/main.py b/examples/datasets/ikea/main.py
index df2bc835d..e4f709b73 100644
--- a/examples/datasets/ikea/main.py
+++ b/examples/datasets/ikea/main.py
@@ -32,8 +32,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/pix3d/main.py b/examples/datasets/pix3d/main.py
index 4d48d5752..e96836b44 100644
--- a/examples/datasets/pix3d/main.py
+++ b/examples/datasets/pix3d/main.py
@@ -32,8 +32,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/scenenet/main.py b/examples/datasets/scenenet/main.py
index d6dc7a878..8e03efc01 100644
--- a/examples/datasets/scenenet/main.py
+++ b/examples/datasets/scenenet/main.py
@@ -76,8 +76,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/scenenet_with_cctextures/main.py b/examples/datasets/scenenet_with_cctextures/main.py
index 4c7a8e523..59969ef5b 100644
--- a/examples/datasets/scenenet_with_cctextures/main.py
+++ b/examples/datasets/scenenet_with_cctextures/main.py
@@ -93,8 +93,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet/README.md b/examples/datasets/shapenet/README.md
index 1c612efa0..8caf6e275 100644
--- a/examples/datasets/shapenet/README.md
+++ b/examples/datasets/shapenet/README.md
@@ -82,8 +82,6 @@ Each cameras rotation is such that it looks directly at the object and the camer
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet/main.py b/examples/datasets/shapenet/main.py
index f0d809fab..bd6f11f28 100644
--- a/examples/datasets/shapenet/main.py
+++ b/examples/datasets/shapenet/main.py
@@ -30,8 +30,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet_with_scenenet/main.py b/examples/datasets/shapenet_with_scenenet/main.py
index 5fa44a33b..05e56132e 100644
--- a/examples/datasets/shapenet_with_scenenet/main.py
+++ b/examples/datasets/shapenet_with_scenenet/main.py
@@ -86,8 +86,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(150)
# render the whole pipeline
data = bproc.renderer.render()
diff --git a/examples/datasets/shapenet_with_suncg/main.py b/examples/datasets/shapenet_with_suncg/main.py
index 2ef01cc6a..f108a50e7 100644
--- a/examples/datasets/shapenet_with_suncg/main.py
+++ b/examples/datasets/shapenet_with_suncg/main.py
@@ -53,9 +53,6 @@
cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
bproc.camera.add_camera_pose(cam2world_matrix)
-# set the number of samples to render for each object
-bproc.renderer.set_max_amount_of_samples(150)
-
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
diff --git a/examples/datasets/suncg_with_improved_mat/main.py b/examples/datasets/suncg_with_improved_mat/main.py
index aa32f8bda..bb84fe66d 100644
--- a/examples/datasets/suncg_with_improved_mat/main.py
+++ b/examples/datasets/suncg_with_improved_mat/main.py
@@ -68,8 +68,6 @@
# activate normal and depth rendering
bproc.renderer.enable_normals_output()
bproc.renderer.enable_depth_output(activate_antialiasing=False)
-# set the amount of samples, which should be used for the color rendering
-bproc.renderer.set_max_amount_of_samples(350)
bproc.material.add_alpha_channel_to_textures(blurry_edges=True)
From 300675d4c5e6a8d0fce63da9908f4c8fa7000d1b Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 15:54:38 +0100
Subject: [PATCH 23/46] chore(.gitignore): add resources/AMASS to git ignore
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index 2734e09e9..6f359ae99 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ blender
.vscode/
output/
debug/
+resources/AMASS
resources/cctextures*
resources/scenenet/*zip
resources/scenenet/SceneNetData
From 0ed0142905e43200cc1ef3214fdfdbea777982e6 Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 16:03:18 +0100
Subject: [PATCH 24/46] chore(set_max_amount_of_samples): clean up all uses of
word samples
---
blenderproc/README.md | 4 ++--
blenderproc/python/modules/renderer/FlowRenderer.py | 2 +-
blenderproc/python/modules/renderer/RendererInterface.py | 6 +++---
blenderproc/python/modules/renderer/SegMapRenderer.py | 2 +-
examples/basics/basic/README.md | 8 +++++---
examples/datasets/bop_object_on_surface_sampling/main.py | 2 +-
examples/datasets/bop_object_physics_positioning/main.py | 2 +-
examples/datasets/bop_object_pose_sampling/main.py | 2 +-
examples/datasets/bop_scene_replication/main.py | 2 +-
9 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/blenderproc/README.md b/blenderproc/README.md
index bc1f42bf0..5b1ea3320 100644
--- a/blenderproc/README.md
+++ b/blenderproc/README.md
@@ -76,7 +76,7 @@ Config file:
"config": {
"max_bounces": True,
"cycles": {
- "samples": 255
+ "value": 255
}
}
}
@@ -87,7 +87,7 @@ Config file:
Inside the `renderer.NewRenderer` module:
```python
-self.get_int("cycles/samples", 42)
+self.get_int("cycles/value", 42)
# -> 255
self.get_float("pixel_aspect_x")
diff --git a/blenderproc/python/modules/renderer/FlowRenderer.py b/blenderproc/python/modules/renderer/FlowRenderer.py
index 38f88d07f..d4548ccbe 100644
--- a/blenderproc/python/modules/renderer/FlowRenderer.py
+++ b/blenderproc/python/modules/renderer/FlowRenderer.py
@@ -46,7 +46,7 @@ def __init__(self, config):
def run(self):
with Utility.UndoAfterExecution():
- self._configure_renderer(default_samples=1)
+ self._configure_renderer(max_amount_of_samples=1)
if not self._avoid_output:
render_optical_flow(
diff --git a/blenderproc/python/modules/renderer/RendererInterface.py b/blenderproc/python/modules/renderer/RendererInterface.py
index 283b3f9f1..aac7c7803 100644
--- a/blenderproc/python/modules/renderer/RendererInterface.py
+++ b/blenderproc/python/modules/renderer/RendererInterface.py
@@ -116,17 +116,17 @@ class RendererInterface(Module):
def __init__(self, config: Config):
Module.__init__(self, config)
- def _configure_renderer(self, default_samples: int = 256, use_denoiser: bool = False,
+ def _configure_renderer(self, max_amount_of_samples: int = 1024, use_denoiser: bool = False,
default_denoiser: str = "Intel"):
"""
Sets many different render parameters which can be adjusted via the config.
- :param default_samples: Default number of samples to render for each pixel
+ :param max_amount_of_samples: Default maximum number of samples to render for each pixel
:param use_denoiser: If true, a denoiser is used, only use this on color information
:param default_denoiser: Either "Intel" or "Blender", "Intel" performs much better in most cases
"""
RendererUtility._render_init()
- RendererUtility.set_max_amount_of_samples(self.config.get_int("samples", default_samples))
+ RendererUtility.set_max_amount_of_samples(self.config.get_int("samples", max_amount_of_samples))
if self.config.has_param("use_adaptive_sampling"):
RendererUtility.set_noise_threshold(self.config.get_float("use_adaptive_sampling"))
diff --git a/blenderproc/python/modules/renderer/SegMapRenderer.py b/blenderproc/python/modules/renderer/SegMapRenderer.py
index 2ece43315..e93ec3969 100644
--- a/blenderproc/python/modules/renderer/SegMapRenderer.py
+++ b/blenderproc/python/modules/renderer/SegMapRenderer.py
@@ -123,7 +123,7 @@ def run(self):
default_values = self.config.get_raw_dict("default_values", {})
with Utility.UndoAfterExecution():
- self._configure_renderer(default_samples=1)
+ self._configure_renderer(max_amount_of_samples=1)
if not self._avoid_output:
render_segmap(
diff --git a/examples/basics/basic/README.md b/examples/basics/basic/README.md
index 4207a17c3..090108e4f 100644
--- a/examples/basics/basic/README.md
+++ b/examples/basics/basic/README.md
@@ -110,14 +110,16 @@ location_x location_y location_z rotation_euler_x rotation_euler_y rotation_eul
# activate normal and depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.enable_normals_output()
+bproc.renderer.set_noise_threshold(0.01) # this is the default value
# render the whole pipeline
data = bproc.renderer.render()
```
-First we enable that `blenderproc` also generates the `normals` and the `depth` for each color image, and then we set the amount of samples used for generating the color image.
-A higher sample amount will reduce the noise in the image, but increase the rendering time.
-The correct value depends on the complexity of your scene and the GPU budget you can spare.
+First we enable that `blenderproc` also generates the `normals` and the `distance` for each color image.
+Furthermore, we set the desired noise threshold in our image.
+A lower noise threshold will reduce the noise in the image, but increase the rendering time.
+The default value is `0.01`, this should work for most applications.
=> Creates the files `rgb_0000.png` and `rgb_0001.png` in the temp folder.
diff --git a/examples/datasets/bop_object_on_surface_sampling/main.py b/examples/datasets/bop_object_on_surface_sampling/main.py
index 969fc3214..c0a04ab99 100644
--- a/examples/datasets/bop_object_on_surface_sampling/main.py
+++ b/examples/datasets/bop_object_on_surface_sampling/main.py
@@ -111,7 +111,7 @@ def sample_initial_pose(obj: bproc.types.MeshObject):
bproc.camera.add_camera_pose(cam2world_matrix)
poses += 1
-# activate depth rendering and set amount of samples for color rendering
+# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# render the whole pipeline
diff --git a/examples/datasets/bop_object_physics_positioning/main.py b/examples/datasets/bop_object_physics_positioning/main.py
index 2ba7f11ee..bd01a7f4a 100644
--- a/examples/datasets/bop_object_physics_positioning/main.py
+++ b/examples/datasets/bop_object_physics_positioning/main.py
@@ -121,7 +121,7 @@ def sample_pose_func(obj: bproc.types.MeshObject):
bproc.camera.add_camera_pose(cam2world_matrix)
poses += 1
-# activate depth rendering and set amount of samples for color rendering
+# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# render the whole pipeline
diff --git a/examples/datasets/bop_object_pose_sampling/main.py b/examples/datasets/bop_object_pose_sampling/main.py
index d9a78cea5..1cc33e306 100644
--- a/examples/datasets/bop_object_pose_sampling/main.py
+++ b/examples/datasets/bop_object_pose_sampling/main.py
@@ -35,7 +35,7 @@ def sample_pose_func(obj: bproc.types.MeshObject):
obj.set_location(np.random.uniform([-0.2, -0.2, -0.2],[0.2, 0.2, 0.2]))
obj.set_rotation_euler(bproc.sampler.uniformSO3())
-# activate depth rendering and set amount of samples for color rendering
+# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# Render five different scenes
diff --git a/examples/datasets/bop_scene_replication/main.py b/examples/datasets/bop_scene_replication/main.py
index 2d2970e79..1c36f8ce2 100644
--- a/examples/datasets/bop_scene_replication/main.py
+++ b/examples/datasets/bop_scene_replication/main.py
@@ -27,7 +27,7 @@
light_point.set_energy(1000)
light_point.set_location([0, 0, -0.8])
-# activate depth rendering and set amount of samples for color rendering
+# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# render the cameras of the current scene
From 1236038ec258be53efc0230fe0d1b4cb2b96f60c Mon Sep 17 00:00:00 2001
From: Maximilian Denninger
Date: Mon, 6 Dec 2021 17:40:45 +0100
Subject: [PATCH 25/46] fix(lens_distortion): change jpg image to png image
---
.../advanced/lens_distortion/main_callab.py | 35 ++++++++++--------
images/lens_img1_real.jpg | Bin 103520 -> 0 bytes
images/lens_img1_real.png | Bin 0 -> 361645 bytes
3 files changed, 20 insertions(+), 15 deletions(-)
delete mode 100644 images/lens_img1_real.jpg
create mode 100644 images/lens_img1_real.png
diff --git a/examples/advanced/lens_distortion/main_callab.py b/examples/advanced/lens_distortion/main_callab.py
index ad9369e9a..cfe0fe594 100644
--- a/examples/advanced/lens_distortion/main_callab.py
+++ b/examples/advanced/lens_distortion/main_callab.py
@@ -5,7 +5,7 @@
import numpy as np
from mathutils import Matrix
-import os # path
+import os # path
from skimage.feature import match_template
from PIL import Image
@@ -13,7 +13,8 @@
parser = argparse.ArgumentParser()
parser.add_argument('scene', help="Path to the scene.obj file, should be examples/resources/scene.obj")
parser.add_argument('config_file', help="Path to the camera calibration config file.")
-parser.add_argument('output_dir', help="Path to where the final files, will be saved, could be examples/basics/basic/output")
+parser.add_argument('output_dir',
+ help="Path to where the final files, will be saved, could be examples/basics/basic/output")
args = parser.parse_args()
bproc.init()
@@ -21,12 +22,12 @@
# load the objects into the scene
basename = os.path.basename(args.scene)
filename, file_extension = os.path.splitext(basename)
-if file_extension=='.blend':
+if file_extension == '.blend':
objs = bproc.loader.load_blend(args.scene)
obj = bproc.filter.one_by_attr(objs, "name", filename)
obj.set_location([0, 0, 0])
obj.set_rotation_euler([0, 0, 0])
-elif file_extension=='.obj':
+elif file_extension == '.obj':
objs = bproc.loader.load_obj(args.scene)
objs[0].set_location([0, 0, 0])
objs[0].set_rotation_euler([0, 0, 0])
@@ -40,7 +41,8 @@
light.set_energy(30)
# setup the lens distortion and adapt intrinsics so that it can be later used in the PostProcessing
-orig_res_x, orig_res_y, mapping_coords = bproc.camera.set_camera_parameters_from_config_file(args.config_file, read_the_extrinsics=True)
+orig_res_x, orig_res_y, mapping_coords = bproc.camera.set_camera_parameters_from_config_file(args.config_file,
+ read_the_extrinsics=True)
# activate normal and distance rendering
bproc.renderer.enable_normals_output()
@@ -61,22 +63,25 @@
# test: compare generated image with real image
if "img1" in os.path.basename(args.config_file):
- real_path = "./images/lens_img1_real.jpg"
- norm_corr_limit = 0.660 # low since the real background is large and different
+ real_path = os.path.join(os.path.dirname(__file__), "..", "..", "..", "images", "lens_img1_real.png")
+ norm_corr_limit = 0.660 # low since the real background is large and different
elif "img2" in os.path.basename(args.config_file):
- real_path = "./images/lens_img2_real.png"
- norm_corr_limit = 0.890 # less background
+ real_path = os.path.join(os.path.dirname(__file__), "..", "..", "..", "images", "lens_img2_real.png")
+ norm_corr_limit = 0.890 # less background
else:
raise Exception("Reference real image not found.")
img_gene = np.asarray(Image.fromarray(data['colors'][0]).convert('L'))
img_real = np.asarray(Image.open(real_path).convert('RGB').convert('L'))
assert img_gene.shape == img_real.shape
-result = match_template(img_gene, img_real[3:-3,3:-3], pad_input=False)
-if result.argmax()==24: # center of the (7,7) correlation window
+result = match_template(img_gene, img_real[3:-3, 3:-3], pad_input=False)
+if result.argmax() == 24: # center of the (7,7) correlation window
print(f"The generated image is not biased w.r.t. the reference real image.")
- if result.max()>norm_corr_limit:
- print(f"The norm. correlation index between generated and real images is {np.round(result.max(),3)}, which is fine.")
+ if result.max() > norm_corr_limit:
+ print(f"The norm. correlation index between generated and real images is {np.round(result.max(), 3)}, "
+ f"which is fine.")
else:
- raise Exception("The norm. correlation index between generated and real image is too low. The images do not match. Choose other object or config file.")
+ raise Exception("The norm. correlation index between generated and real image is too low. The images do "
+ "not match. Choose other object or config file.")
else:
- raise Exception("The generated calibration pattern image and the reference real image do not match. Choose other object or config file.")
+ raise Exception("The generated calibration pattern image and the reference real image do not match. Choose other "
+ f"object or config file: {result.argmax()}.")
diff --git a/images/lens_img1_real.jpg b/images/lens_img1_real.jpg
deleted file mode 100644
index 4b868e9c54fea16dfceadb8b3fa70a44f9cb40f0..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 103520
zcmbTdXH-*P&^{WZgY@2!DqVUf2+{=vl+cTGklsQUP&z?CK#Bwfq!Z~Kr1##7bU}JT
zjS#{=zxQ41-Y@sVy?1Awy^?kIIcJ_TvuF0qe)6#Nun8d7R@YJoU||6OSeOssVHKbX
zc!c#|`>%iWU&F!vugAr~!N$SE#l!ob1D}up51$Yp508MDfRO0FhWYlGnCS6;KmNPP
z|6ckC7aJRw2pj>K=XpCQx-l#-T_l~aA8rmmr>rET>3jWNi?)XdJ_!O_Xt
z#nso(KOhhs6de8GV@zyZd_r3K=Zws(FWEWY3yX?>l$4g0*VffHG&VK2wD$D&^$!dV
z4Uf#s&do0@E-kNYZSU;<+1vknaELg+xV*Z)LH@h_4;K~y`+s3!`u|5<6c}8O{?h^e
zf4Hz71!4|t3LM-gqIi_g4e)KfsW`+U38+<4ztwaTa*7+mY2Nxw6VY->Y;hz0gZ4kj
z{@($M`u~OO{{r@Z<5~ec#>T=Z51Rr21l;3ZV8gWcm3Un2EZ|jta_nm&6L7DXQ~sEb
zN(9LqK;{jRTtDl)h}3nmp%VN&!o?J$+f
z!zC7_e@_u#W`S^V!Uw?n#bv4i$H=TGqMPb3X0KXg;-z+gB18YMuOnx7Tj379FrJk+
zS&?(t(YKTlMEl{y>qXMn%ba;cSJsJ2tfo)q>&40Y;yKtk*`12PPQ3#A4b9Kl`R&4r
zD55`k-wO~(6+612G@(X+0~sQx>>{C6?y4Nasr78>QEplA@BqrHyUHwgB%5!!3ltXDUJV0T9n^0n=Df_hEQdv9sSBwQd^X(JGNdZY=R5)pzhJ1IEt9llX>!bS1KrA9;>PciS?
z#@hD>u-(-6G7Oct2>)Y7`2$stVLVa|j@D66TsO)WwqFaonYRu?dGBPAUE3hYGj4q|{U>!=<3^rE
z1k$e8<=ouT(oB|{2M`^UOz0Fl&oRJp6~lV0?^>}
zN!|5X38f|(ix=7WJ&jKIrr4Eul^tMpWY|1F9l
zKG8R1+XN?%Jk=*W5A?^l~H(yz_7iQyxVlz)D{w(>~|g6JP(YJIN8T#v%^
z%MzMtaQ1tKzu>ZrM%5UFoX9y98t(xwSFL%-Gr2j9?3&{kCrXb)QTInMk-_8`Q
z$}^N3neA36!4X7_%Wl8e96k-i7wY6ka_G4EznObiCk5qW8%(0+t^GL=Hm~oB94?xZ
zkpEZjt|A=+=62rIpywa)dX7@_ddt@m%(_lg`<44QTNsM!{@d=)Z1;~No97(@2WR*w
zy$%E_B@3Nf_l2xv3@C>A
zAN5bJO&DJ1ck$4p-zOjke+7Kkr(Mmo`1~nN`FijQ_T$%fu{l|gqQO6O;Cl!YbqPyR
z@YUFtk673;Ct892t>n}f5Ldop7gJ?+$X9;(_PQ`d0V}sEeVzqbSVC~aK*pw`>-7v?
zrpe_&0>7}{nc(J5ryQCFk##(dC>M!Q5?J`&3Qym#I0AjD6k|#e7+$g;8W<328^do}
zV~brzEBx%7Zx;Pbeuw2_(ll1l3X3{-z-2M8qN6vu*3tIg1K_G+9Q{nkXI-ql
zzD*^~d~AB?gHL6Ni_s|oCmd#qG^ILVrL1enp*5_*z6n!znKm#f9<4QIeKLB;r?`9S
z->;KFytLOoxYPPMhb*is*6cDjq`CJ`ZgRu^21}oh2K6vXJ_w~7?9U5sn7VLL8wg>F
zz}6dTzzL8~^2%U1xp2A-_y2ouM?jzu2opi_dZqY#5!@|kYfXlY0Lx%HT0W%H1KwuS
zS&{@_=JjOaxp(zG^wO)|uZx=Hs6VL*@}6THbYVy_%Fn1~v6-dOdHi#bdbP1nO(27Z
zN9)6vE13$~{!DImyFH%r>^DX10rV9VqJ(*yxH~k^4o)k_?Ay^wRPeNdWT=pTGWP59
zh=l8fsc}Zg?uSvdwwq2bkWq;d+23A;-GA*n7IY_?Zr_Vu?qm*slhny#KD4X?$xAh5
zJo~lr82Me$tyQRUAK$Ir#P*W27>xL;H(0r)xB6v+^>V3jacAC^U8wJcG_bD(MF10C
z@UP<4Y)IRAJuh^!qsG1NqVkcoD7`;mUGYA%pkumlHhr5d{zTAVlvu~oVdUw+v$peB
zzDtV~WXB)jkcq8o&-!>6MnflzUytjW7Wdwfe*Ngqzt(6hn#{G)k7QLvm~T(^-sS$5
z@+1Z~E*zQsoGWaZT^JwBP9@bCYcYVdr_OYq&0OgyhG;e{N(=|CTA6Ejdu*&H&&ffD
z68=pZAid!a0M%J1_&p@s#kICibD+ij(DNKEjd3Ro8lRJ;UAqsmtXochV+V
z%O6>SF8!y2mh^IKH!mltv_-$Sc-V&8Oy{r!Xho3=a}F!r3;gNzd;k#K^pWUj@_uX+
z&ZhroO$uaRLJ1t}CkKQayPxf-5Y=exMCV5ohkio@>bnbU({%ob|Iilw*n9rd#VrMh
zXWpE_sEIpszX2sVm%Nbv?oX$~=Dk2$?Ykt=8=Bn*0R9z))Ms0=3t^@;Q0c*01_)bz
zwpyTA{?NftYHcBLG(1cVN5HGVuH}c>Pth{_YXzh0GG@}4ZyhgTyLggMw3zo73c%e0
zkTKurGo!O^fi>Tksq`@`iWm7;(zf;^6Qs{H)c5KdLKPl+e-cpNq+r6cu$3tjxQ~E&WUClTyCM2qi?mX(x}r%VzMeY^
zLkYh*0d~8MV)uiKAfKmpNKl>{LTp7*_nl2>-e&&%^52})R${()ImmzGBOsu95@Te^
z4pT$4PQY8NRV%j2Lt^13oc?oT6*AVV18bU4U0P_7+_11%%h1Vzm8g5D=PT<++LhU|
zw~GN-R;>!}{&0T?RAhW9f&KgWy#Nmq0E=8KsAybCa?*zw_I_qL5;0wPSCx@t9ydo?
zlt*-{CC|BS7DB&LA*Ryb_IU9lddl5n@memyo?&_xf7&bJ+VzTzD%233{3RsF{|iGL
zdt7eNy?6rr48kfsi&Epsk*Lo$CbXbUeFpm^7ybrp8jj|n|L~O-V$@G(OowH-P2;e7
zVCAr_D>ksFN|$w>pX)Z&uJ2LnVz9=^{B!@j8V^1BO0*rWV!?N;c(hXSZP#`-soJ}>
z6S9z|kP|jKpz%Dv-PqTam8Xt@ik3Bbq19>bTVrbG?a(fylHXj{uD2G|L|c$yb+q1o
zppqM?_;ljCxy4=;%AsQQeU_B$>LOk>GGrSk)%CFb
z=%+wrz9b~Pb-o+tuGqNX=DLgLKIptpL!C4H)YPmglSkKN*TmVP!^P#tQsdvL$95ML
zKi5}RTV8W=`Jn`I+*L<^3FYOx3MA_y?LDqWCsx-EcI5&(<3UW3vRB3FujAgoj-1rq
zqcimRnB8UOH78v{Hs!18PBYS)d+w@Thxbaf*{{OD%{4x{KFsZluIgBlr^oZF;yZBa
z(C~oVjH>s`1NQloq!_(tC26{*4U0lt?@ENG5dGc-nngtwn~%JFRhwHV
zHo9TG*b>}uOv<0(v)Fe;xvTlCWRay}k`43(4N}(-!fGb4uxRRCJQxl7J)iK1+7kN6J
z@0*kebN#IoI%#X(44GUx?^rmMnx_0MIm9f9GFxnJFiYS!2n$C0TftP<3MOTEMuy|S
zOpy;Mco5d`a(nmKQ@!Gwl6|<3ixPXwj6YY)`s+gMLDxI+>(|JBg=glizcY-nKSl5(
z&$g@bV;y!*UVtr{u0t$E8;1tyssYF1Q~m=2Q%X3<^&^cY2-oK*GB&dd#52E3r9zP^
z#x(sxpekDSP!80GsfDyQrgHyEdsBD!f=Dz~;f3b=V296Sg%oRxqfk;bXYUWW7dLF}
zYqXO1d5ij6B8+Z~ndw}fp?8+gUbALgfO;dS)-6$QQuI1=AFT(7@qlrd880Y(J@#kh
zQa?}v4a3QvAYywPXRrJ(IP=$vO0}3iR}-L;LkG7u2rZH3+g26dT#2(K4(vZdln{^A
zw~m!3rxrWV6YH|Ity8uQ4cVqD?^Sa4mBhhjB=Xx1`*E8Ck_2xZ9{@ohmwNzA1MAIz
zhvFo@QX$cjzH?Kete3_YtdkeD1LmnSUwD*k&khXMG
zj2-gpTKa9ewH=sqN928d1n~iyWgZ1A!)d6AS}t~z@N=&z-^YsQOwsS@RD`P~9al8Rj7upMxB9+s(<1WL8
z2@Z8lw3}ZKO}FEg94vssvD)e~@sr}sChjIJe_itW+FZ=E{&jbM43_#Ev?Fk4<$b5R
z^qe-P^950EB(L*;Uhty5tCg|BUxQbHZQS=+p&}0e!jgp5-kn!cC&D?F+!UVOAiZVQ
z&W5w}@bOOW_hmCE9k*wbJm4JQpVm$fB!zc`6KufmxAyi7sK$Y6_2tl}gZ|?pirIz`
z2y_RU(5j77gZvmUM5_I}m%0yH@y&CgG1=2en~z>EseIf1o`k6@GJ>ko<5BOAUcbV;
zZm&XM^xJYbrN@Y?O79*tMW=ha<5S|O@QR$}*-qI|i<*dy>I`tXtk_Dc=+3;wV>H{x
zcV^+UO6(8=ykhvgsZ`XA*d`J1n;3dqm@xBN}sPJ9Lx;MD0l&espG48MTDMSxQROh~Pw%lvw(?sPWs>L@f
zoyqe^YZ4&`yQreElh*V8Eq)oOB|x~=ezAA};!cgjqAgn%HGTBX!cJyTdzqlTEgpwz
zz4<%jpUJ+0a+c9*bC8mXq6{+#dU)$TmL^t6m8Pc*1I@n^uH4s+Ny|K!D0j2Q6ybclKwQ?9A+ZUPrXy_kn
zKJ;{s3oR|w+Q|_Vxt84+`uR`TzkVJvX`K7Hf9O7Ca{R1iexp-{FDEX&xkjI5eK`oV-(|Tb@xS7S4I
z!BA>heT=z^9SeU4*LG;WRv#HXFP3N<{5={18h6adE4B|?P>X(DTBbNb8|LW7MiqXM
zvi|tO>f6>?9M8ykT`lP%Q)V
z7xtk}$X{8SBR2}9k8bZk(j~~nK5=JG{b>b0dGW{DCGz-Ig^F&eEeNt0zcZT2GH}yL
z;9ER4$GV2Z@SR0ouE*h$b|%P!ZS!4GuzWWk=^rNUIW3v%r?C*=isX@v-SPciXpDdS
zzH7ok%e+zz!ti$9ook_>R$0a*-52Yx4=}^_$co`NLkB`5!7=0YsZ;*WTe5IHT<^v8
zC&=zP~h>5
zdG1PV0-6dLWVzKDb7(L`JLJgwcO&qs8h^X1Pm#%bdO=rSC!Ydm^iTV*M;p(0-|NY+
zHq53FxOfee(Ik}aCPA)7P&QLSLhZys!1RXcxkA!sfus=@;gUZXE8Y|ys+oEDtyw{
zb|CQkxhCADfe0eF>z_h{j4;6U5G+)Rb*?;em+t{VekhaI9FiE#)hp<&zqDYDmehT>
zmmGJ#gk|NM>*Q0y+x>Hm45tf(D1dvwn~
zk;Pz0=tQ!S69Ea;535jkHtgN<86>Y6Q>@CSZXf3%)TeX$J#>}
zm8;*bhXto9Q}z1Z&FJ`7WZuU7A*$;3$oTRGkBy%G>ga`G6H&?0t5s#cMo@K^)W!LJ
zhugtzu3+5Iesp@YMOEd@zlab9?Ary%SAThyG3C`fyDnd7U|WkFkvbLgrQNJDv)BQJ_fg(N)Uz^rPGiWXuw_?vb93V7U&
zV6>)0+91u}v~Ku17k_tDsFwwhAqiz}X)a51d_Zcws`DoS^$zCg2{3v7G1b>o`0-er
z1%S;rL7zu9Gw}1nD)PrY-1&GCuhqp8?m~R{*i%<7ckT+Xb%>;9%9RWt-Yw(g<
z(6A{5?CoX+PVZ+%Tujx~Qm5qNk2r_$1+ltqWLLn$+wB56NZaj=Bq*bLYQn1LVDES{Gaw<9u)fqbwAC0Ufr&)iKZ*k%~E
z)}3NUXHJ)WW4!q;dF@EKJoE~DGa3AktXqi&;fIKAfcM{N(i6Ni*6YtL9l+;lE}#9p
z72*+MLWD4^bDG=0kzmrb>p3s_5cEe${c=$IUtm9)75XcJO0vpkIL6ZK&!gz6psIU;
zrFNm}P(S#``hWl+*;U%?N@cWTvM9Yk>U7vqYzrk73jSUj5RyVt;kF>t(GV&
z9Cn-tcbE)aWc>D2twnw4hBy8Pdiv7y*m8%&$)FZ;`^MX&mtg;!?BIb_2LfbfsdD
zZ3^%p2jG_kBf3_|`T~`h;WnZ@+7=dZKTZY+*-HNd{G8I-by1ou&CF=-`oxO!&PK!6
zslWw)$f+)8PpuUj+wG$f1AevlUWoUk>yNvf9;Ne|UKHuVF&CmC-6i;4lQcy0?e1|r
zL9Z0P1)m~#nC8-twl+_gRAwBUavbqZ*MFP%cfh1hnxc}tC-H1a9`?9i3dj&U0$=)CV5AaCC?BW5iLc0+j$>dZ=8bNerT%YpxeW9Y(
z4NU3&`@8LwUJ7?rlqvQB^uY3&TQgIY%G
zvmP%olegc~7T81!zY&d2mh6~bFPu`9&@`0PzAu0j
z=6Jh^jyu1}Un+O!OkeqV)%Iqsf038|$kP2#wYO}DD`&sqZ}#ZZoi*cHkh;8hPR?(;
z#~OIwbrECf(J4^ogq(}9_0iOXlF&11_R>koac;YBt_{+>z3eu=moQf3w<3-sX)gsE
z!HQ2sUeDQfa<1CsQ|e7PWqN5YtFgDQzR-F7#s$jJ@cxasy_yeqw%AGdQ$Bsp8+mH3
zg1{Zf5Z^jJ&EesEuiYpnaA1vh_;YPZl_M`JCFft_cK=mZbhw~ZHgqK6FIUYqn-|{$
zVDR(en=RN1s5h82o!W4e3&ya;VW>r|?xJC2T^k0p3yqR=?**Jl3g%R`YxnQ533$2f
zy
zdMlgSZ)B5B9Z1|ty@2^o-eb~+VQ}KVhfOs|`{{xc=*^n>Y`YXb~z8HTa~n
z4Wn_T8w}8vxHDQ!$_w}ZTIw2sr+_WqUwYX3j;eIcF6}wjdRJiPJwLalknWQh{lFX6
z%pw4a%#2f$z?=ta$v
zH?J2_PT|kH!39~PhDwQ%Yx7}DTNAk+=#W)9A0N|}VZ|fDw<0z5Py7AlL*!57_Rc1!
ziz(IeiK6h2-h}#ZQ{km^>8&bTeA2^p1?y`itUnyTwO$ej<|X92Mz9q
znbg!(FFi%&s?yD{O6ry*@8#Cs#cN?{C5J|MUf|db4Db9bAOGKFl^-U(ul->X`lM9s
zCq19zF2&JICngE3+HsfK4)}>sLkS5ArYT56isvvvse9K;M)IB}lyHs{0qkbOMW*sN
zNu$YUP6IP{vxPW(BE+qVyhcPDCe!<0`
z?ZAo9YK=;ek;q@7W)9$?7GFAVJT(kIY!Qlar_ozOZj_^nRfKOV+j{D>U^aEpQ
zL#Gs#HZ89ec0O(hTVKjj#C5}hpq({YC;%ZeD!xx)vV)D_=ro$T68Its<}szG&M)TgL8wiB?yM{tUM;QT9%we_ewoN*t2jR2qv}cdiFjy?29!stg^hsC
zS|epVwXKoQ!)cEURD5a6DwQn)HfRd59sm%wk*;TGHrH7fw8C@)^RD^fb?xQ@phisk
zvf9(d*Ix(2jxhmlW?92u5r1^@TpoR$C@`2E;@WZ>e%+pO9drMsQxQFO+nN0%)E61z
zTcA_X%eC@$Qo)dLMy!07emNF*5q48?D;5G?KQw$#Zf^(eTig>tFJP$HCXbAueb({C0Stsv{nJ16O`LM%`4I5KqjN2*=2517Fb
zTj}a@B%+%VxcN<-QlmGS@|icfsqcCRI?=9iZQdruR-g6Wv&aUARdU9j>++wcEpVo*
zu-lH=M~htcX@7)SvYGVEQ?$8EFdl?eC)LeB#{oIF$PuG#~oB-27n0RbQ%4{N)yvug#{1?S#N38Jd)F
z=0_899DW9R{m&jAHphNfyk++QD8voTx&vgUTrVIEdRKLA|M9=|MP_3W+ihTeFdbq$ogpiu(`
zEGUWJLzMu?=<8Ty_tsg|I^BZ1kR%z!;iH*_y<^b?si)&DjY&HM!6BfPg$DrKlWx#B
zC2a8odyC8Veq8P#21qXO3o|n^yy_#GHhP9lWAGDRk!w_nVM5H^&Y%9Dw(6oA9ki|Z
zB905O7n65Z;e&Eb$FYcf{b*KU#8pgHAZxmN#Px4XkzTBhw#W7#jyo@chV6Lt?y2gX
zZuqnn8xnhKBhJ7)!lS<6@T5dmGqgfh*m{?Xb}(}|FDXA5)Q<6a{_Y>xnj~g34Z6Bp
zZ2`e#G9dW}0FDdz2Zn(b4JJ%V=#wg$jtvt&&EtTT`6hZB>U=wcM|8JR@C>xxm;M=0
zvZaer_dUt3*xp#xxHg>WvQNR#QIM>iVo(!ChmN9(@arf}_Wa*G46pP=yVyQPs6OWK
zW2YHnz?MW}`~q%Olhc1sLe@&+K#x0^ZAeS*^O_CSbE{3`8)D&V4g+r(#2H
zjfYNS;5?r>UA+DazwcY9<>yJRU^!s`P&qthhFUD1P-(soMH0!3KkfZZsQvnD%Kq&2
z;(ZR92|Wg^Z;B7cLxvd6ogH7wy1xJg))r(w7s{i`J9$-zXDH|QsI?2pD}9^-_r0#Z
zFA@x=1ta|ZW_D*_9;YP9aESVO;mG?{FjL&PUj+P(=Og4%|3=eBAB#~BnqWav-bMI0
zO&}SllLKCL_*m~AqsG3K@%ffqE!Y}GVYGFYtbluG@&M@3M__laRL9z}eT41MtxQ0L
zDtXjJah1=@qx2P=8XUi`HAv=}rHEch!cgRc*Oc{^lUrTXdemOr2!*@{zy`m-h57X?
z(iYyeiGQNCBl1BB2R^G&>}>YgYguT1H*JWfYWq`+BT#fS=YF*V`3*iYA*It57fk&^
zWPHyRUJaCj4FXpuuixAkp{2r?q2!%UP&`PsZQY3H^O6@l85IY?c3SR%l)cm;JD)-;
zTU5kIWvXNum*Oz$%N9XVHeH7yMeYpH(^N>_?TFsLo_?c15=-lbR;x+Y14>N;A7ka<
zVe8^fT8t5Rgb$$cIxWq6fw)5M`*F5ha$i?%0_nwVQc7Woz|C|vd?oJip$#T~SbSFl
z+XoK?>~FeTTJ?99cKyPcpLG1W=j$r@kC)aA`3P8ga_&}N-8VJ-G4`MjOV=-
za^;o1<&!N>5=<)QQ`hAQe~n&?sK-z4q(skm$|FrTnMTc7m{bw4?K-$uRGG=&Wd^>;wRHLK#{@ekS+&srLi&39jcDHXjjf}yCO
z*8}(8fi4kDq0UGLIP6Zfcl{|sXyV(qL*0ti7g@$>Qnp_N)9_Y?W0?KeFC~v6;QDNl
zosyVW;0``ANA}-2Ja^jCC4Msntf^m}1~l)w(~i0yD`|4ca|yWCYJoz
z-yla%0`UY5@;A1ABvl&W+((isAIyPvvQB0Zy@P0idsrtYQo0+>ovS}{xk0^VW@5UO
z%Iu}aK-{1b8JV|uaJ7h{5xHgoXy6Zsi`qv1ah4wZ*K19Q^2P|GiJk}62M#;{Xix$0
zuNyJ!(+ak*-)?fxyXKd9q?*JeYUtFG8`*?Dr{*QyPMz4vSySy{$cLMWiz~4M`YZB%
z_}lzHT0C7g%StfYL77{W)(tBI)!5M&xK0e0GO*bXNb5}|YNtB5QVJn(NI66klne&XP0oCk;~^pC-tj@mD(^fZ+%iU?&NJ
zp%NmE;3X=Uiv)tM0nfLz|IN_@!N>*Th&Rs?B{f0@3M
z+tz3Q$WtB{Q*4RNpN6~Gk$Fy!Vn+>y703QcYU@s_Zl@r5Jrl14gjD+$g}iXcY$+u|
zG{yzAKFSUjL{w`Oz)C#2Gr8aG3#AWgd-ho)d9q@FHnM(BQ{nv9;23>sE6rLp|6=au
zcz2b?=ca4{FSnsjx&Z++y^r~e-;ABWlKg*8|HzMDdw0;Fj*rzwR8nT~<;4E8G(l{Y
zS+4K1S7E*;Xx{AaGrNGWO#tf1zAcYL%B!|e%g2#-k)xgWdUN3gBWrR|QS?od*l1nz
z5;gV%Af+MX0k8}bT$W|2_!F_}1~P}m1u$}vJOEf*gXyfbSiV3}fe!Y$c@1*T#r$zs
zn8EVOy7HUrxqgU~H2A3e-JUv8#M!5YLt+;ua_wjnHev<2Tl`4BWU7&;3$1>l&5CR6
z_*eKB)wKd|bM5_==K1n2+KYzF_1>g{arIwb!xZknyT55?JFc}Rtv?MgoK{H_xoqo@O`Bm!yQ(_mrdz!ef@!y+eUw06ISv>BQ=mBFMs|*q}!mW
zpHao$ejS%Bh2~1KUEaocdCto%sO_7s&)h$9`hki)OGt
z&Vv=3HE?@n58#^>KR#mEMI_~~Y>`pMjA5-#2DxzaiQV_!H
zfzKa2$N2t*YGgXZ$Ib1pjc-#;-|WxymP6b(t+Pp+_K1y~hSG9%p8~|eO07o&4`obJ
zewePE{I&ECXA^bz&1GSAGNxo-AoYpy{_{JPtB*HMEXk&Gytzi6by~8oiRaC6m~R(A
zeLC(MCUEzOlpo*^OKZan27c^Xn94@$8XKa7s7yeN-_R#?aAJd6i3zMakb`18?fTK)
z`QlyNR-Agb0;M8p$nBz}bes|ql7W@0W#(ir$)~2&g=~?5x5~}6i!}TR*t0tw1*}B?
z7T5!_+{f?4S~&qkH)rYjEp`^+DR8`jA>8zrv15m;vf~c?RnBfi79(S6o+~1~+^n!y9h&k@9zM<7ynYIhbI
zH8V`HR(&1od=+~buv6uDIGn|(ro!;(Ei~fkH(*f!g%m`1b*MrAJ29NsvN;D8<&f6XNue-BZ9<(i=yX)o8lwf$Ua8EjmP=A-hP$uxrKe?
zUC49wItmn5^WnIoI~$^?7BlscyOBh&^-r>5{5(;~EBNcwjBGPH^-BSMs#|;GGZdvj
zU+v$GrkhB{AOfAH&ic@OwAs=`LAVfxEV3|apZ9Juf1-@yVXfI^cBfU-rk-}I>l#nA6@^nqIRP^6lamL7I3s&DL
z#)MTg$zP(^7cJ>EGf65U#FsZ}j56BUGLS4fTEuS`n^zH!dhx$TOiu
zZJ*>bop28|j1h%&wj3EYBrxZdF7tXGjbWB^(kPm!i|gkI-O1xoZ+H}o8=72ZYibq*
zHFsvc657(7aZGyAt;24ptf=dV4BTmo#^f#aSw`XGN=oqJ!4HBcAU=vK^-xaghcIuB
z<`=VnCTY)TG+XWR!~epvx;87;aS<=An?Nb6FW~Eifh6Pe89lmg4qOQXx=PErxX*E7
zxY&YEA~xd!lmy@rC-KrKu1>}|zdLu0?yT$eu=}J>114HHzdXF~BK*7*%g>xjJ90k<
z%_v32q6rA^B%ZTpbn+p`A>h8oUJSz!WYR^Ak*|hT0f~hmHbe423H{tvfcS^Le^WOs
zU5x~@I2Kd^sXf(Lw)A$8-AmY1dYQ>F`??xbm7NBeib+a{kU)gk-Sr4ED{KoG+qze1
z7r4Dzk!wCKubl%fHZ{@7us0+V47tMlkp42qCj&Gpq+f}qLA`{RplI_?@3y1|^$8<-
zvN6S*qrcF)C;czjv#JLGhLg}8#Tp^-*9SN^F({Xl)-~GaGxTN_E0`vy=9sqqJNTJC=C;+){3b#Q9Z5hky!F(O|
z#%a2p&K8}tAmVYpK=8)j%h{@$V{=14Q|3+OCF=YY0>
z2^dSs?`phm+LmwojeYHi(Csf;^y#QatUT4?5Y}$yj>XlKEz^3U)|#VZbVjyJtOdtR
zmbz}U!9L(Bb`P!G-nY#YV?}v2UD@JV2mw#xL};a)d^1TS1qH^nqanU%7|{qE_XU*s%^zITEI4tY~`^N
z^c1wn_OmHEoE78gG?a*bqrC5{QP_o;U3asHxN1LhfqV5K-c$$XXD@5`)oL^*y#<^Y
zWMNScfUOxQ{FN-v&Zfd}JwWG(HgU@CTbAUFCyJ`AFBl(1)w{_u*<-!R+ZUijvgjW{
zWpTLT@NsNCD1W-3DRzv6!2b^3JmA}JE#Wb>Mrkn`!8xxa96kiCqdPu0Z>odkhkOK
zwaSZ*CWp=FCq*q`CTN2$)arYWM
zZe~3IO+i1NM%isAj%(i%C;Rp>T1ZQiP#aOlON+s2ya4npZWZ@+;S8bjNV+?27EN#B5sPi+gpI#W-cA7U!#xfI3n#Un`e80hBw^8k3XEBbf@(V;c?n
z2_QYhJIdd#$?iof;~`WlYkC7xldIEvhr=pdGl#u>y+%tdzGr1BTgJV3
z!@!HB4Esb?$5iZL!}G?O#51~x!ZdDzJf
zcaOJJKYRXDqao`=Y&eKPJ@bX&GR*KH=A?Y8AFtG(6(zHvSP{=Rg(
zgl$`&fZ&s8qS(!^M%PkERi{d4vyylY9q7r|;2Dv+ZkoL5-@sAo>M_kkXJ)co7|rgj
z6N;Q};L`+e$9=KiPi%XFP-(uMLq}@MUau&N);6H{DR7BPN95QAzP`8Z!l>0Qgp(#N
z%e|aPsdX9zdxnvF;*j#d`e%NXoOXY1--=JGc9C3(
zkXwZ+)WvEvFR;&XG(k?%Bt1n9GO~S`y2JKk5pOaLA>E*0J>#
zvdedEVNZ`bt&ea-d<~7jl=!-_lqi?v*7Wa`k>B5zd_M>nBD<)30MKF1SYD+p!}>tb
zy6{MNh+bDc9Oq=KSyH~kOfBs|Fk%`*g-9V9;}`~9Nfh$j5Y_lA;YS>eU0NfBxwodf
z?7m8Frn_~e98=R=*LO(%Q~QQYxYp_3)9b@jcds!;^p9PX5}2jV$IKGA`HoWzUgUSM
z-SO?Q57_bk!-a*zrkZ1h$-7mq*pJNHZMxEE0`EI|mvfOb1zT}i0E6Te3m8ow0vvBFZvKT^P61YZ$hJ+%=f(Kn+&Q+OZ>7xD`+@jj@fwmz^T{7)NCyKL^o
zz^IhSXOtG9la=c=%vXEw!ryjcAu!LKy&g_9X|4H*te|NZU2A>L%t(_-flXdO`yqWm
zmRCaHif?e`h&6V`8g8v?S*b*b#Me&xD6J&W`gy19{6s490g$R2ZjaPE+&(gbtjUhW
zdRkivTv4G3cgiL)CW#M+#b0%;JOK1-5Hm^Q>l47bMy&pocrlsU4b|EEFZkU_QbH_yNs>Xd0|&L}Z*mxoN
zlx=twff&H>fM>=tCw<%oyBvVj$n=3$?(4Q~p||J%7XJ1!H#La}cqJ_1#>@O`1^raV
zCLg?O_*^{h2olQp1p8=lYs?n+dq3X&^JYMm`EcZ>(@gm=wV3F};!K^3SlCWJwD^#jNqq&%loG}OXa8G)#}PDxOT)X%nL3ID*uO~>kfqS
z|KrLE8Ie7XtTMC7xFRbVA+nRbLpYo}5wb%l8QGaX|J?I@
z*8B5*zsCDnRkVIeDV&X|X2QI0bVl7BsvaUE7IYeiN%4O*mL=OtxI!osN_-kdNjEB*
zYTv%rtN0wPbkqAEYJ7Q-^Ww6qY$&6=Bf=>K21bib2FEJKKY~0|BX`a)_aT%T$jmy{ecEjeGhs@h98*xskQIvGk|MY+Re=O+`<{N|mY*WnOkBZYP1^
zFCMe~bx%D06~Olm@x(|IayCfvR!M2I%+@+}Q91#DcJ|}Vl)lw!n0L6Il>_#E_Zm5x
zL9L}*HyazxEx4FTOXbG@o@d3WF0rZaaN14djZAyF
z-4RPc#h-i0b)^;XOR;;yf39wl)22QXEpX#XL|AF)+3skY*vQRrL+1HIe%{(a-F0(i
zU+*eWW?StSabvz+1B1IEZc-yv9T@^>T|x>+*#57f=W~f)G+%d)%c=C3iTS}CvZA=<
zWM531&|bV{&`s)iS3cz?PID@iO=@*M^nv7pTCqgmxj=Pv2Cb(#!_oR-$?7M^%NG$h
zm_ZA<@7dR{bOAH@j$5O
z8p;Z^7-ZWVMo(k_vwC(VEbm%{8hzK1cii;T`PV60DCXAFb7Ice)AlJ_PG9C6fS55w
z*oAuH>E>SS@Ix}vP0rlBt23ktcs14$q^|BY3YSz_B?G-Tk6gn}3Es_)Zu
zy$Y{e_n+!|n?|I?V-x-F|3{LLJn8UK`K|1OJYeUYQ@}+o^P+k9fx0ywC^W>o?m|sk
z+R)iBu{j?({YO_%-+T2_C{`6@C|KC7%9Al&C3BeNh6GIY{$Zs56h~Cts4J+v*#CS6?pJjp3>gdO%l(E4hRzd
z20m5{Pa9ji4I+XKmR#>kXrCVZ4Ffd(WEp6(E(Jm~RIt*0x^eW6c}O;^0+Uowpw+?0
zKSQbNbqTrMbh7~JLoP8b46Z@;>mG3piC5>i`K3PSG0Mb@EU0$b^zG|I0+Z3r|IbfV
zuuM9DeZaiL4zhM!uxC>{C(bPvF(g|e|Dn)lTSaV=o(ahGjBWE*C$FpAAZDuaVFt<(%amygfysV)ANbWRF`Zd!yhrlm%hN
z0aPj3pIOKG|48ifqT7n<+dHyoDidC9e>o`yv<>^rXG3^J(q2}^e^+)5xs|#~|5TJ2
zK$RtJs#cicY500Zh*>g@gPwGB>lRe5m?qvUB<#6`%H)c^EY6beZluWA;io_0ip5n&
zO83V%f?O)#J$75b3fSiaq8ZXWs@?pyv$quPfeB@HiPG3!yxvaXk3M_Yividd3)l(|
z8!#2$tr16>RZzve-lndHTB-UW^RHNqmpS<`8%^?2cTeP(l~gDWOoZ_al$?U1A$OjMXO&oZ
zMf`P5QG2T=atH%VYS^pQ|43ef(tYz7fU>PQwGQ5x<)H@xeSWI$cNY@HU9XU
ze0%<3ylYWn4x{w<;}rAAW!c;tgaTOvR*a=o;iwVq(WARjC2ZKFCJ=Ytcl}LuB+aU>
zyLQ)fiPdX4tp`_-Z5$G*Z?KR5ZO85|J@Vp2myOr)ViQ@AO!3Xep_ju9ackJd&1|}C
z$Oz5va*0T|2h0Wo>}M@Vs^jIBIll_G*^Rtmj&o!z7-f!#N7m0LH6p^7K;cjYUzhbN
zgD{G*QzKT2;LUn}?_{B0%z6(Kn8GP^H*;2w9_8ZS1t|byLmJC}ThEJTof|PqG%?rW
z{f{I>R2_W5f#1eDZRufQeEa@k#p9WkmQv#l`yLmO>#T_?!2gk_rJ19AjukfIL#@C!
zhG;PlHzbu>N75J1T2*b|vP7_|IUR
zs(;oVo@E(RQ8#nqIjJI&$UPDV-_$vJ%a#NWOCa9t9LMNQcL1Clxjo9ZYU@9eWzrtZ
zqn*>pOS$O=*%~*`DK_Ec(X@NQzZr)+((tm|Rb_XlKU8DXNC_Y0aNow4`8PcMpXq=Z
z-o5|zA4%!Ym`lX!7MbJg~Ypf4Hcqa(FUBR~R5_
zWoy|x-6u5vsp8F3lL)%d`JPlD`*uKj{&V0Y3R@P+Q=Q?#E<<~FO7Ctr-kti^LA!>P
zxV)T8!`bdHTeLpq6Q|;!Rg=maA5Q0zIBSw8k*LF@=h6cSpom=eFCtV6h);o{Ib>rpXI
z8S;Az(QO!+3qm!=b;#uF1r;2&ndMff78Sa)s{8*kd+k5Nq`5TFIO_G1^R=YnpCOlcKE^&Ue9avWgd|_66drwzN3p%_tthcVP&ZiEOnkh5GtS&i
znVM$t*}2+nV1GZb{RS+Xp23rceqG!%V>b?P`F1tG8wLZnO5OjN5UDl3L+miYgik7(
zX#VxJ=!P}wLo@R%=v#Lp_jEyBTcSXFoYKwTj1~7ColPQt3YhOc1vi-=vsTztvVyrf
zUT-8lTorCV_>TmB-`vCKbMVS!F)%pEtm`ez8Ma7#1Bixk<(f}8(mvHb?FhL*-;27yv*Aod9vr!
z$R>^=>ps5IQ^b^Yv%RN~(jU;Zp=QG9pNsy%6M;qG#oF0oUsM2b+6voTU<0w2XsCKb
zL#3kaIcMj3&+Ti!^Xv8|{MRA<-mZ%_>;}qj@II%<_
zv~Haw^5Er?9_TW9w52_C_!QVf0=m9~7z1K($mL1ZW+)AxkY#Agr}dB)wqlmN_8CEP
z!SULC+lPdx|46)&S+Q5I&^&24X-Ka}D8c#fCKa-tx}tDogpGxtH-gUN*bXb`{SADi
zy#WUZl|I{GFuf3(dn{X45pU?1aU9Zg;Jj_8umZO`w-;2-g0u4fRoCx#!I-g^5D9rFm(Q~pPyHBX6qpc_yP
z2&X8fiayu0pi9zt){K1aQC`U5`d@HK@t<;~sx>~F7m`j_RnEHw>*LsqhOd8FqrFTf
zg)Kf>X^y@ezx($9UTkGaIYTyA(|b?6ZP8kStEOkSGwGB_ZGsbp8b!dPk+(a7LgAaV
zA*=ez8qGG0fBD3>f4+&L`5E`)c2c20xJ;TIH@qk0GP4njTMwlP=9Gd6Y4fYntd7WF
z|2do?$bdx;6ay~C<4n}pp!x`X!cnn(ZiLcPBu-GcC@x)gYc$N_@0=Ylco!D;KDV?KCLg~v6qR08QG`QZqUB`+)r{K;P8
z`q2w>=9`p$RiK`0OGkBT_h}7flXr&-?n$fsJn1#d?;e&H4ncvryM~0^3VrYA))mS!
zhd+?(KkRP8-G5XgUFf4$dC)2xdr7jmDtf)nnWNWwp~2O!Q~3mQ@gB%Fx9=>K$sRj^YPjEsH||(*W5VDmw(x4`RZ$X#$TDSL1y7bL;9%ChZdB
z47!>PH9tmImPR98DsT_;tzOP}dCt#A%cT8B@<|Fir;TS4?!EI0$}$7h7vmqFRnd^l
zI11_GdU1~c4&I}VDwfCPP^0^h@}V7BCNd%#>;il5S4;AqCyAOZ-Vh+6amZS+x}X2)
z86AbxwP}T$d-=fwHa|KyVIBuco;`_c$FY5A!K+*u0Cl#&OUDn{Q3*A+&T@>nr4l+u
zYECHMC_1Vn$+v=yL|4cgQ%Qzz5Q2e%z3iRj9e3o7OPnG3vdU6S8ciRUG`aK3mc*hS
z0s2dO>^MJv_Ga1enLZgz0Ls4(k8TV481UPJZP_eqw((Zg3F3Q$pm^UyaT2xMfDsTZ%YSt
z@k%&}<5q@h#f3&M54VRTo#ld^PMz$-a$R0wvT#aR*unT>RnBm~@GHb?o8_>33Y4%}HFdylF|58>Hv|F30`*!Q;N^PAN$Thlt>@TXJS+*YYOOgLDt|
z-R{~eR(!G^8!66yo$70iX*!Fg+q^|guMd@aO5d)yTQ4VPu_Dk-@_wmYm$5-<(ZPY;
z&o?)FBEQ!$LLH%4R{iFSWX3+#Z7pq>+YcQ7Le;M!{zeCZciN^d?nIq8#?t+S3C-6(
zql6xW3#ul*%XEGI1>E;jv9i5gv1(6p$NN7L*BREkh_XE9ubg>G0;mTlOPQYvxi?d_
zJFTRiju@cWxT3~iy!f}{$eZAwU}oHSDZBiJ^1UGZeziG!$uBw+pTf8LZhdd+dId?z
zxWe44{yb5%kB~_*spT5mq4_M)4mn&cNVW$_=TbVDIo)Y5vtQ<&=4+zibiB0Q=Frej
zw@Lp7n{1YQw#%>l;1}Dzs;F)i#pHIGqBs4I^7`(h?PKL9{IVBT^!`%cQ>y9ayZ^vE
zr);gBh=13eCPTp=O)&@rQ+Eb4TW(JLPCT8iVOG%C2`wCg3E_F?<+jsR9-^b68%1wO
zwt%|f(4`m_ij4+}haQ;pjEZWIUx}BDQP$#DE{1WYx}C5mqAACAReH4L3aJc^HQb^H
z#}hm&^n!;>XN3}{O0H<4U3?XKw6rmec2QJU);>T>7}|_Aw}<2
z$>to;3K0!HjAj1<#P2N~>!?B(%nS)xU93R-#kn>+Y?SsPSe|}~o41&$5`@CXU3mlW
z;J0v*fAN?@EE-+7!Fuenbs9rt22u$dNn?>)=0k=o5WP)g+$H^;C-jj6J5gQ8kgKxW@jpr((p%t9u2tTl!^r+MK6-eQ#P-f=OXFq&30io%k
zegdaBc>W##oA8a5zXjKMh~~$<$M&J^x{(}EoAuv5v9Nndw4(jjiP;Ym{nkYU%>8&t
zD7!bby=N+w43{b_=hOhGJK_=xyM(%+_WGMxriQO9c?#Wx(>di;S~r{y4)^5MqPx@x
z2~KF1_c&|3++4|&PpYU7kDH-vqWi`RwOO7`T{coTl`I^<|G(clcG)xqF;+sIs;u49
ztkhBjEB-3bDnG51Bt4eUB}up5swNLR4%-Nm!0ES4kL9tmvYb6N)`#iESp5|_QNA%{|)$adF)?8Xvdgg)4kt+1$bD}*`fq*l}8rCe_{Eb
zIq*VA^z0F<7sDretMJD~)xFPTEI|hFo|nWsP&4$^aqAYC5g?=ksy(s-Hr=9Wjya!Y
z?6Nvdi@ykmq-XL?zTf*HHwo|I=^#bht*;%wS{1@JqRuT*tBJx(TxSN#^NV>Xox~6uIEEgTo!YwM6;}Q@_@e9U&^{I~szIc#aQ}@X#Xrt{
zs2eN$E9BZeBb@kQQq9}r9b+Ek`oUUI@3C9k@-nkdBW7Nub0Z{K7*49Nrb1TTUx
z-?86tFQAXn$Uge_U4rxP#O($x(@)YX4=h9Vyn@M^B@P6<6$AKM_#^Gq^tMFD!N1(3
zJ0CtqtoSTA80q_CWU*F1cyv)}xryWNKJo!+qkDty!dLA@t78=dJ?_H=Ffzyd{jc;GZjxBXI2Be5^?i>;m$ZL6{fysY-%?xS0Ly
zXGiJBs)fr7T014+5Wby1va6#@fir%Z#6PKH=ZPN1!lPjNnp-sg6nbQj08zwv<8!mN
zNWw{l#&U-~@c1!8o7^ejcD!ai!d2)x1%!&t`Ku0e*l)TxYxVuSHJs8zaz52H3Y;6K
zg*A8&7~m*MpAhQ3y`VG-u>tWOIX6`e4g_#-G-!$jpg=AAuESzX4U1^O%gF+S>$GHM
ziu^Z#)~$b9dOhErhq9xIeK#t+;dLS2EOY2K1kV6`2+x2q{(<60wT7nF7{K22i!z%E
z5dnzkRR-*Gv7P%W@03KtsL0gngV>hVmZ*Z*8}~{6oD>RNAx33^*OGy))ic01NGAFD
z*)3FFc+!KkS)cm`rt|{OE=UgEZIc?(8ujQPAJ@qei0dYHm`v`#QY}Y
zf<4DS!1xL2L<-2N#~KS{>sgxq#76s6wfKf~6IcDOdnDyQ5{9~1!wtxx6+j8`7n%2r
zP>L57qXT%sn2EH=dDbiBmaQuA=%zX0c0wDGtMV)jX`ZKaN{sQ<7f4@JsfMBVAHneq
z2u!a>vfAMQj3%6E9Isyjg&EDytWhC7?IQC~hK5nspd&cCYs<6^Z5~~HG6*-Z~K
z-19n{L1XQA0>S>J-mQqnuMT(4lQdz%@SZnv3N^sAy~j?H;ouVu5vENK!S&)Ea-5wO
z&kWidyS9!&3E&2E06`G3>#H)sfU)epWe1OTD?j}uH9^xx6jvH8>2r;z+)X+%V1C>U
zQm{l^#QrwpvIE74vH?Efzq-@BY4r|W2NtESHCvJiq8oqKJyYD
z7f*#pcTo`9oo4bdW7r=VHNzWFc)8z4LwrYP7#x`WI}II%T+qizWMc*wHslw-+t!1Y
zZ!rS5lS1gypBZ(=JTjot=X~^k+_2ZooY$H-ZCRi3y2b8xf2RR>ueOTYhe(rXzoD;n
z*gFTh!T-fAQ`$mc8uAJ8h0lTKxZuy0vj5B_0=aZDw)$$Xr7ccY_l8UA#3b&ja0ov!v0Bf=t|2faREmi6Dgtv{g?1;uqCNhSQskR8A2lF9^gkVtND%Uhr
z5ZF!C69S;SVhB9Q4R!L*b__(nR(TvOc*ZcW^r|Lb^G7!+_8L95ng|Tf2(Wf>jde*i
zp@$M&zUhLz5+!`9@J+682`(@4iwc<6D79wv)y7)*>Am?5=0Y2w4dUoY
zv;QOQAUB$E8fls1w*rZ}WUn{79;Xp$_;HRa&-v}x_G?bMZ-VHo*25G??SfzvYYmw_
z;KVi34i}Widgg#QYk#g78XWEOu9IGC>eLKquPYptVjB1&T1=OhoGg_@?%&Y=Nq9k|
z&sS#)N34Pm;~uyRE%*_zd(TvF_NSIrtc~CbF)y%BP(zq&tY|TDBCj#C?^tGlDCp5%
zr8Y;zkTUkU+%6LE;1XowTrv~(HaPKcz2X>S18$pkq>$#`@g*jq$r!bIs6B
zdaJqywSeNmokZ@SRGG|uzgvf|;C182cTmb0IxKksYz*8ml>B`H>QVHqqRLJ?Vk&f8
zz}otk3b>;=apEL)vc>z-0~1atQKKcU!pWe9I5F4=OhMmjVD&+b&JkZ{O-n=btBsbp
z;GCU|hs;lX!Ov55*h;1hwyQlBn|->mQ~+jH3T}$JSaX3dvD%O{kGx^ff}-EBXIRE+emLGmuoLN6(o8PVi;E^1h-r9&m8}>hvEnNkkC+L@ZfP@J#
z1&;$%PT}D7D)4Q&wMEHw&9Sn4cYjTv3Y))OLbXqPxT`uxtUlio40LvZbd%BTshG!E
z!FxLF7#U(u!ET-85B;9n^5oS1`+5H}lTI5m|L;>*{1|C>$X=}6IQap`RcwIra>b~&
zLBxqwr+Q$jTZYc;?AL|VMPnu55>Y=7?4-eDTCQdQ=#Cou0P3*YY?Zxn)?ubvjfWXE
z))?e-=ZSRp=xtU`=|EAo#YdO79%Z3%c}y#IF;CTOrRB>I@{JHEBm064?
ze9MEI`@`~_1yea)PH5KL1lZMz26EHAjk+%5XDYkz`wOM>s}$Bn;h?t-
zwwI@NwMp>>kWh>pDkIarZ27mc8%>b(;9)1v#_D3x;dwo~;v*_}QyX&;Pww{tWdM$mYFMT!zY37hZ8rxcz3hTQ$
z98!5d)BZ^sZC=U8Vu@j28wnV__cWG8-e2I#@zcg%ZdG9S}S2+q9#Em?E&O
z2*ixE3gA0PZPGUjW)nnzwZ+z1-8cW8lJnk8X{b@)Z_53rO@AK$rIdTE4f;~|nuXi<
zz#wlMN=~XO^-2cxu9SoCsWjc1bS|VUqj$=)ffmp8VgJ5sPKiKjc^|Xi5?ht&m(NnZ
z5mE^z`uUAB*SmV@kluR0u-<6S`qXc6KV({Znn8Yx#OtcK^}#N)h&gE?UE_jTpHNz`
zv=~GnP>w`We*cU2UdtAUSB5q$uz@-W%rbQ|q6z4lH@?`{i5_uVovW>aASOS4j%Nz5-7vluGKDKfNUx)K(R7
za+&B9&v*0beR_v!m*!`AP`K@g!;p$2Mxypdlc2FPkeR>CtVD=#BK5
znvd%b$GP37m!r~L?LPZoPw6u;CAN};ycl7P5BciF9$Le0&b?GC=x<)l
z3lpT|f|>R=Tik5j|9$RopxgdA+uTGAaxeJ9-y(b1C*dbzUh&u
zrx|g-+PT&$FiwB6@}p@-xo_=!Gvz(RF9%M|Zq?BFy;A($iYGJg442eB;zNbcF1e}4
z4!HT1UqwL-H&ya>tTuMsERxer1J&}d|Ia|qGKtgo?WoEV0d-Ar&N`*
zQ(If}(@!c%!+}5f*S8`JLe0zIJ$Ek$=fL*dJX20^ndfMsGWVZbWTX$X;ro99Rp0{&
zfFhuxKaxT78$?s4KAn8ilB773I!gP8iG<`YJjf=nz3pN4j)8R!n5h?t(-Hf4wvTm^
zJzz%08@Q#Yn!R(meh+{V(X|3C)0pOAiYK}rbtO8X8C}qV)v2o>Cb=xz)|NZ0O!wb(
zIa86)iJoU2>YzO{s{~-&Gu*nk-%fm6C}vzFFcovNb|!=Pws8`s7o-(7R;p5P;~9CH5M~8FdvvDb{%3Rkvlh4
zFz(Bh-~5ctUeNgAqd1@lUvKBR-T3)=9!d4j#AXJlRt_cjyf5nl5IVX>Y=Rr=cYhU<_h_Qp0LZ3DNwSs&ES&1OK~l1H=J%+q5_Au=PJGrR`
zQ?vc}obC9oDjHa6D%kkE$=7N%S;#;vSn8FRHu0wifEB~**N&Ap5V0JTu>iWEjvZ5M
zSn(S>)7?hx3>S!*6h*~82q!W2xMDfn^6AkFb_|AzLoApb?Z-cgM=ODk(c03vePto~
zig}DsbwZIa@F0zWLgFx(7B9&f_bym=aNd5V<4NG8wdkKeW67*1=59uFI6x%niMHsX
z0scD57RUuP@cJG&PP`U2ti<)2GAZ8julbVZQORFE>TeBazjvNUdPfQxlFfne%v&3h
z{gO*ATcGF;JDfa3>vTw7K9|u-{28vd+jhW9dgYJKy`Y42vt?e-fF9;;aE;GVE6xzg
zM5t6@McZx8_t4*g`1d{HDL+57+^urjUbe9MBB+(2B}^R&7eG1DIUQl#f$yRe*nrrPEJTex&mMw
z<~tfR2$&`N%#&23Fyv9vP&{#uGb=7tfIW51=KmBH7MiQ>~qoLdg
z{GB`uU9-I?RAsJMtqzjAb$!Q94&EbAyaDwleC$%Ng?bXope(C&s1(%bSe}2CyLpZ5
zi&5Dcwoe~dzKKn)``URM0xwQSh^v}sBu*^xDG|wl6M4ZlD&M;LUigv0B-|G@K46Cx
zxjI%tMQ+-C>|%y^mL?(b~
zR7gwLAT9&80-bXz;2(OMG_J5|r+~NTeG)@&huHY>v}Jqy=Qd3)5K~hlHyryLL;4$GA=7I!o_%15O&A@|ACAUufp^QThR-@
z(Xgzqr$4%y7mQiXgt#R5Bn}?v0N#jC+RXp<0)T1pA=IDn(<=;WncKJgdnEXA`+W7L
z#|7gaAc)5?yZgc4yuzEdWO(Fn_fy6o9!va3a-Xc>F`W+kD3k$-dJ11QcB>Vf|G-E6
z7P=A~3z(*Bxj
?yon0ee*Iwj$!_r^O+wSfQ+2fQ1+tk7*aepHA`Z~WPzu|<#w1L?NUzMGAkV=q<@`Z;{G
ztX{!$H&;r-z+gXPhsweeO79EIm=FPR>U8Ez$j3j|<8qQzcOd?IVOIjLX%2_dpph68
zz??!K&vR?CLwD25Z)(@Z%4S~nheUOCuJW&S^)sEQBtS5stwPR>&Pm2INN%0~p{nju
zfvBl)?RZHn115Lo3;GD87a|IbG60hufNNv=k3m-Rji=-mo-=Z))T_*mdB+A``E@Y3Vml{C1uJ>Fv;j{Dxm3*rjD+};W4s^q!IC0xBg}dE)pUNOIqLFrp!ox?
zw5SxbSL!XdJMQEHTOiQ`XN!lpv3o_%Rdeiby@bzG8@Z
zjP-D+z#5$F+?#WnyZrGcOL4Q|D~)jG-47YgDJ0vsU9!#z{KQ-2z9qxWtac5nn;pmT
zy#%n*ezQpZt(L}9WtkIXI1k5^FqVEj(fIsXW5(uQ&yw|(IlXs@u2ZOD&bBkZ*vN>k
zaH^r)84}OR)Nc;EiRE06g@XMs#x{^H$c~4LRDHo>>r&trzW^8LSy>gl$5w?K_h>~I
zrv{3gLSqU*1@hBdspFd0c5~`Bgnw%t$IeP}(=Qk!*gI0|?s-`^H#F^X4uTcXGJo
z8zj@hA8ok!QKp-iJMxhh!~OT|TqiuAmv*lT7~(&is^0mxwE_CbLF}wrwpw*J`8oPF
z+&ev4Fvh^+n);p8^UYPQr?=HbYDq}ml0)xoAhCQwJ;+zXv*
z6g{LQ$@L|MnDd1KOS1}eKx>(5&O0ICli3`LRZweVY;l54JjaRg@C}m4aZ6Se!Rmo?&FL3i
zrAwlo#WzG0%(nsm$zlJr6>LFXo@MGY%PRTwMAW6*$=ZJ;&Q(g_^=5Db;ZK)lQ}Q~t
z-Covh46Gb!r;TSJWWa6#M^&A8cOTpcKYp@~Jci#rOtH37URe8M(Zk$XH8Y34H^h|S
z%l<}JAX5dyGHt~^Z98UrttJO7qyCCoE6x|f!x??f_&|>8Blzf`*%35CO-M-AC?DX9OEX(%VmTIBaY;C+p1Yb+xqL2`k?fwSWhyB*Z)%<=qLL
zADRH($QJCSYW(6R-a3iFit-ogG8|M%QEHHXU#Yz+0gM$b{aEU=j2mJ>Jqvt7mq{lx
z?fuOwZzk&$NbWwej8?i{#;0g--2g2K>W5V-5jx|XpZoB(Vb)WF(nNDU3e9-tha
zpe5t+<{Q6N^{nR_^;FMn^aMnnyuM|0^Z5TC$@8@ZQdx3t)LI16UUG4XJ`P-08rgs#
z!Nwb*RZG(3>o+QH0Vvs&PHBct{OcXPC+y&pIB2zWyOYvjv6^Np23Me>os#;<8A%3T
zFXR!*d_ia>iu~TAE!*<=Y3fa6TLAP0b!G$v{;oe}U{*pZ0H+P9Nu{SMucK4VWX7?s
zONPU#c2P+wKOkuvLD7gtw}uLf09CE?KFpM%zu!cm9)CNZv$Nlbu9Cp4o7e44!ndnN
zuqtwW?U1Pq>V+O_4L|f)|G6PX_tvo^HXL7om<99g+Or$yf1zE5khUVmPqE^52~L2i
zgA>9i`GEvt=H+77frAn`U%Wo=yM5wSUbQxm%*g2x
zzmT$I6S&bt@tSI9ysWZRj#i&fV`=zMFHg@M&W$%IhE3!O&7R_wT!Qoe=JL+u&v^>9
zIAE4_y&YF?uq~``8!9@PWbJhcK{?Q+QFDgeP`Ok?ZL?!&iRoY#Q;E%m~B=en|^!jXd0GqpFZ!b8YL6)MjI
zdAUY@@lU@fN8X0wQ?jlzxPiu+zM}hGjI#mvI5}lUrLBMh=+n&n2&gVO9O|HVu!-ruS5)SnJPU~Gp=0GuB-2ox5375b3}W2F|Hsc|L#R`2a`C&(
zgXs^26sz(1Lm$#DtNjww*G%%}I>2au!$w9!RGM{tRDEnCjal=-Q6K+V<%0kayW4*x
z{yI@IzodtUK%bb`4uGC4_^^UEdxq#Pgs18DKaT8j`f5{o`YlUyIC}?ZE3r*-pz48l
z!g|!a>~F|sXQSfAPiuiKMRIFz-fTs=1cztdlhZQ?{uSiG0t9;QwmE3nySjVspO({p
zF=)&XbcHg;)&`v^S^((sN1=n@^VjFV=l-yzsBcy%x<>BK6QBoF;!d6Qfn`q0JDD=FT`#%>wz2$A^V(Qvm%fdZzuuzY;Cf)zFW@sh*Z;A4lp!A{aXz@yj2i7j_sGe8TFKc<8heW#ojBRO5j!lUgH=B^1#
zOE&7oLW%*e=?~}HN681j-P0>NG&zV-dSe9*4SPF;7X^9Prd|z0xLEjz#|_CHQ73t7
zLUAYZ>TkI@-x5Sdq(GAI%y0CC`!xcPIZ@Gi@lg2rY-6jqv++$^tLHyc1NSI*e&Utn
zj%?WgKP!#sV==iF$$LY?j?`P=Q#!ci99u)GpH=qxuuG`A4&i}I2GiMh75sO)h}2+9
z-Lsz$`x4?lC4Z{1PMKaUkIvBe6!Pw7r^zp#jC^mlSjGQ;!a&7$prr+*AbxCM{66^8
z7JJW$du4y+Gbd=`b_VgK_}_bwU3#ILZwq&M8d4j)i(jhG1fE5wpx3xzpPxNLQvNv5
zfB0H>!jKW;ieQ)u3
z%2$5R*Z$7~65qy^h5$?HX<8aav*TvNjBokuG;>0k>>hoeKP8-tQQwYFz8?8xNcCU&
z?wC%q9y>@%+5gm3V3Bfxk2)v;H|zHJ&%FS?`DV|TzgSF-gShB!;YFw-bPi-5C})zH
zdJi5APxsMe-@DvCpZX@(@M5S=cHh@bqPH+v{NdxC@nX@chA&S8Z)?LYEa>jP7Z2ip
z)O2g`#;I*P4(!?n+{01c28D%-qtdpY1T$8+Yir+1YiTQOb`*6l*hi*&Q#_hiMu%b!
z>Itz8$~0Aa8%5Vgj|lyf13T^!3y&wt(^4IvCyc(+Cs|VopWVp{AqTBnNR$;I8fJt7
z%;EchQ}ROv%a={1-&gv?ES>tSVg(}J34YOfEZ!8rt{Q%U_=0=`O3IaOBh+@WEgj}c
z%v2vsKz;3fe0_p`=n8nJ4)gO?NRdJN&JfiSPAGFc%U}8F6XF_*Xcz-h^l;;!(Lb|8
z!OjM1`09CDS1j%5!Ryhv)L5&n9L~+a_qZpAnU6AK8WrVK)t0uNM!m^^2rw2Xng6+4
z7w$VoA97)}QzuiZ_~UGJ0XN7kF{{P0BC~(je0v_xVrVo6<;1;N?JVxE(S&b|%Kw$K
zxMjv;)oaDG%ajM5uK>WB5XmZ=mtYqfxj)jRkYX$kV9@xFWUldo{=FR_o?frJHX%;s
zKAH*ep|stf0xB3;*-q+R(J!lt+Dm56!G)9ps}H+Lpg9MUK_(*ZZ-dkTg>M8HuQ}rb
z_D0aTUS)l#%v{o{qT;dN$r!fKCdMNc-X`%4eIbB5JT|-}iBzS4-x=8J6!}0&Dv2)r
z7%=M7ou#^)?@{bQal^|mni7!sh?CN<=nCCZ;R3$%*!>6I6QCxr+-{W|vC3)2b#wlT
zmU!2PGhKMRsdNm4dhsBy7v5yvA4+2^uMj7kO~L*jiA@ur8M~b7TqEAn*!-QSz2%cE
z8<<8T9GRb8j-t~0IS>BF*y+%dR}<4MiM`((Sqb)p$C|J5O$X$0Yh77J=jFTKDXwIE
z!oL_Ep_5Z%w)krMQYdE&xmef@N9mr%WFd}~aEjH%WrwViJ_Tz_*m9Jirf{xLx#UeJnNc)*=
zAqB(SZ%|f{D8Sy|s+oh~wM!4uc`GyBsc=&9bwCje*#VaG1B{nruRD)xVPw4%5nr{f
zc#%3mzsDT*5@+iAABhj)3ygnJ{oxse)RN}?+7XlaPjJI_6C9NWCR|i7r>g)Xd
zkEAB^!-p}8@13*vd|v!V!bAmBq|QImh*S@76f&_uMdqj6g$y0qxsS+(dx4iEH^F$O
z_)XhyI5pz$r5t1o^d3~1u8QKT_~)Qv3E|ugr+a#zP9y9o*#~tmOLA5tvkPBXc!&L;
zBlm0qeiAZBRHH3kO%us8#D2AsfTRE7-vAeL!L{ylueU?OQKMm7I6k0x0SnR_l$-@V
zn#JiHX5Tw=XW=@9vmn89v_2ly_MNH6UH9(
z3w*3C16QI|vZZ?UMa}TzG15u-{SV^rHQMTLc`_TsxFG;bU=c23|WH*8D~RzZ`;;$9
zzGbOGo+3b-@bw$E0Bc4zfoy020Aic~D#?aL^eR{DX%^6%(lEnB+z&nne
zjo}t=#QmVin$Ky8SuZ++d^wVqvu~RZ1Ej+%FLj^Wu>Q8_{1TlBJ)nwK`^pA+RYq;AuBGje
z6y|aEtZyi*saU2@^&;Wnkl6MlOHpO7k)QRULl>b;jTLW+);`umGg+bL`wZ{6vt)KX
znssh$ubq?-N^)H~Gp?@usPVw~7Vj1_28q98*avu&y}+B?;{#P9dwn2p;x8-GO*YHl
z$I5C8j*EL&p!XeUk{vebpytUR@0Z6;>k^TeRSytb4ey757nTey!%b*>06;@Qk~sjBWRs|3Euw*9^QC(%3}SOWyvs3
za^OQu|H|q=hr<3f!=}3v3?$#zWw=AW)a-9mRoAaHBBKRWl&;0#JeoLtDLS
z*m4(n^g%pXrV%W7a7FrP@0?k0)v;yKpUo1CXROCuXB>EgH#GIwgwrIN9b53bhzZHX
zNK6Wr3daf?Uu7p`BVF@8VqiSzyqKy|;-5*_xaw~!l`9h)Udh|uiC=cLHj-Bn6#~ib
zj7qqxjG1d{
zvjAYv4_b02*$N-2h%d5^!Rt{Qm-I^-{j7dX(7c=0+f35|$DfGNNaNj)!36@udnEY3
zOZq}C$AbDT{M&1{;62aaPRWaTJsgrpgsnsqt4wR46Q{@bL)}u>!B=J{V|Sas@Yml5
zK4Rv1XAX!|OOke|Q#_04MlcqPlQ}$dJ>KvNoHw77qi0@dc=%;W^rOSmSC-n&g4t&P
z`gC|6gegTUyMr4}`;~v5JYfV{Mfr3ttm#Mod((vlmgVLW&8X<6a}C{bRiS43Uf#tn
zc;p4evP3cXO>0VzD-!iMIfo+o#?UM9UGyv{8bNWe9GdIo^mT2^&OgAI5p8vD|QR$>zE;V
z+ueIa*DHfDEtkL&-TF#Pq}=NQY}_S)rRf8*Q{d8a%esngBxr9Czpbm9?NYF2)UN4;
z%8eM+cAINHu3)@UT06E5%fmCI0cH)y2C2WQg?u
zb+EySruZs|Zxfh1ps~|#Df+H0H2a6jM|_PWXh0-Rr7bD(SzO(R(7a6P6-!%*{Vl1a
zo<#k1pe$S!KwD{6>dEyqPMuUAUwl(ymXI@#Q{9|m{6h)1W{UE>z1cc`S6e`87$xd$
zoqhv1pX#8%@8P6b=+nZeva`8s5_7H;6=XMmNbA6&dcLIeK@FwZ5jBUo3d-HtCeaBFER5A?9T_aD!yxsTcqMIJyeArrN)c
z0wN+H-3+BvN~Lq4QX<{0(%mp>6O=9i0RaIeq)SS2ATbc6bJRvkZwO<^w&y*+_Yc^0
z?cC=+=X~!^w&2q+3z}GlrLliqir%+AP)YtA<@Z}n;?y++JDspEwq|b7$66id`0z7g
z_rm`D$`w`;8xRGKP%W3{Vbv$A(%y*NL*m?TD(FS~--Sab;b^I-nge?VC5|e0){u-r
z3fX_U|1`yNb;t0j>LTLLOl~t6IMJ)IH~6*zLlNy)Waj
zGU7SVXh=L0@8VVWg7}~(4!@H5+=TdSNF`DY1WzM=
zZZ>-7#p7E+rdk2*gpPupDA67zVphwjt;W++yrg||`}DTA<-Kl3gcSJ{jXL+Ib6PtK
zS(#v_6I-9e9&tQXKa>_0{T~^fr_Zu>cwcA@B-60@u};Kfq&3IyI>*R@zR&c1Vzuw*P
zw?CS+LN9Q{cP$Gwk1JsLw*Jt>=2Szqvc)#w*|eC2t=%9SSgdeg~dUeUs9J&v68vI@7l6ik_mp*3TNa%*~eetV`o<(|y~
z4~!R>n|=*KUC8}*rRFG}8@&6B@!0cD$5P)q9DKp3?6xD|%6P9Y*8edwpYs{#ai-ry
z#3U1x^^wN>@J`vWbTFlf_S;9-&!y%^Bfhwhp=Qgs6gi*{`ww`cW#&1Q6jfR%?U-XQsC
zE7g6e@Y&;M9LVxy6Iscjs^L4gd4E)RN&gU-dj9k2ZMw&U#y|Y4x2tYn`+W7+c-@`x
zdF*`yrddA|_DQA}oA`S%PE64`bKl#uf%0TC3RROWsHMhEC1B}1d^A11=
zqd=??-s@aS5|wzJmvaPZ@7A0`p{8(laq)W{6dGxdOkI}vTP1<;$KoXHC@QOK_%Fd8
zQ*mwR5Ffj+KeidxL-h~4glhn~_9>WKH@#ufHXHO>
z^EPC~2zLp~MxFvVfWSoJaSrYbqhu~C%p_h;H3k{o%Ta~?_*T;*DB)JA21jMV+FG{i
zMPcLuHfMjOI|V05>VWJu4&FL(IEe8;@5Vuy2$uMEr&35!zY5RHS^XV1Bg+*nTw5z{
zYEbGqf=lGipZhwVxQp1%N-G;9c3IwRTwI^BU^lIQh4Ah{N`M8epIN-u7Lv;LO7@WfeS7h-_^@tA7)VJMnB?q;aRXzo
z@ws8WjSrdnL%Jmqgztz0ds*nBeN#vx;C6tjG~kI>4-Nn
z<#!(fE^3azRe7x$ddqRb_jWYXpkwO|R6`SsR2zeFMjWXDIZ3+Z{|R6=*iOg*#PwSav!e@
zz7|W>D+{9TlBj^>ABG8@nV0wD(n7Z+^*K~YT7a8lz@<*)C+wAd6r87v5)fLFBMQNI
z@P-%Hvl%6A%Au0#{XEf0YpVfra)Atmw~3M?a?CgK
z)&|baoD;7z___GMX^4Js{YANlS2_nBy{XE_`#<)l9_m#$!-p^Qual;5MHutSbI_9c
zhVVPh7%7Vy4Y&|cfe{;?6Yf@=@`l&*wF#47hA;H^u`TcaU^Ee8w&yo96*+9p`vUkW
zLmS5g=TC7M9jin)g%PR7qG
zfj4)Hj$j@-j#*BKrHJ31FQ+hm7lY!Ca~-LM2F=A`JnIC;1Kd+Q`Byu_8w@yBx@BHT
z&_Si8Y&3L(+DW~=Ii>ENXerq*UTGAr{$m<}H4WF^^RQtJZ+CrDK=X?W=wdduXBz+R
zDi{h(QtVw!brC#x2du5MJ_Ej9y>qIWi(cDzOd7C)nnfY4g<0lq#XEbCsXOd?+R1E-);_5d>ynZW2g%ugn@h
z)}(3GtulmXU_M>b#YZu|!~zw>V>u!$kX>!x3_m=;fN)#@{G)xP104t5K2(z31ko<5
zN_ja6<)Lel8#`Q6=oqXyE#A2q8~ka3ZDAg%
z{J!l&{Be0`@jX)m>ihj^?k?aw_FKA#}(#w2?Ef2u^dOoL4c>#TC2F_
zN8*Nt3s=5e8X2X$aEsghb&EuCce9W-w8Q2fFsjPF^;`Y@w*>zmDHi{cjViPD!Lquc
zF%k5{EZ|#ok^-WN5X2U3()XRvLvjY8!kF#s%DMyOP+q2fNPyChNwp|36?%I2
z5G$vG#@4)@U(&5vB@zmaPq7))27alOBGoJcGu`0c31GQ%`q8k(G?Yx%CU_~lU%}^X
zORe?bqrHo~oR}JirowRjCYxVVY^v0@*rCO9LwmYC`?j1TrwzrTrqQcs_<^R|;!4ka
zA3>wu9@x7_Si>>@kr7yZdZ!3a@Y)q?5CsX!{mK2j?5`VK0yz_ZiwoH|pGQC2dmzw&
zU>SgTkZywOA!g^&Df+mNP1%Yej3dk{?7o)f8L}Ms38jtC*6$kZxbu!9FJ5CnC|u61
zXP}?3+n4DT^pbyMj`$n)$F^xCnMMRM^o4rv0N1jDzc9mtLNU8)c;hM^blI#|i*?FV
z3HleF>b)8*T7pk
zlyFmSUYhR~0PDusj}AF}P^N!nvj<{TQmb1b$h|+n{S!M+W_T7Z1|Lsv`ev#2N}glT
zcO82NJkaiVXAwG4$eWx#Z)*pPrWv*RvJYpyKUa13oA?)DY{5L*URO45P*61Hr~#v#
zy(K~L*Z1GrT%Zg+yWZnO%nOnann=7kKD;shD|A>6^-2gPPYF%?18d0Xo4`MGd4C38
z*2hKlU~gpBL2`5-(Q@@GB>N(*myr`<&95UCOM^xE#kIkR)VU;V5{cUK>JfDKKeF2`
zK1oG1K1zl{4UPRScjj6?=80vjsKd7`K4!+8Z?cRe;|hmtx^q|G6$o2>cIckIT)8YJ
zO|OxR`B^!JiEh)Di-R|BfAKP?E<7($`G5{7f2gePbRZ(Y;7KmPjoq`SlviAmJnIZ^
zbKPwT&q`rsO5QvzojwaHfX;}jrYB!lFk~#-UP&MIUb3VT@TK$_ll^a_}Pq10iAZuwX=&VWJ+1fVCNto;wx2Nh3QA3{5iTZIX&^^k0gaveEz^XV^BDp
zcX36nDzbfW#m94WtAp^B+2NYWeN*z3uYN-!g_yM>rT8X&uci!^Pm$<@L|ncE4_s@TN1n-=-z%RWsJpjO1=J^o7EDcwtBMi^Ff5jK@FxGN0MtDX~n?XX4_eR(X{@
zG({u2?NtkW_(Y1jUWVQWuiw%PJ9D2aFx#dz+%+uM`)Kmebzz0bu`9E&G;eMBnl`f!
zVO1vFZ>{VUF`8sz5XPdYzh`?p&$EH`#yxgfejfTVHK~j2!|EO1(p{=DKY8sJ2LY{E
zzXgYFpQMgk`0$O^lF}Pu+r_7dLRIj5_d&l3Q`e74zFEhGdg^TESJm=lj)Ay(Hax-3
zKDp!lw2USCsWoY-ERqY8sWl78h`d0$GtEi)?K=%gzoO?EPqkj8CVt7pn^#*IL*yh8
z{l;b7KO>V@HPHnz0}sM+%R_vG{e_ny&@R*8Tzz>e=~DjvuiE(v#;L)%^}6=&^NM2b
zCKHUYdg*9K$J?zPS$)~(A1_!ZsXoGbe@420ZiABtLQ0#h>zVNhQ%^Fjk!t1YsR|so
zCoaDL?e&Z$@fi#1J7IS7(a@7FLJL99Z2ZB?-iNnc8c=4RkaBW+$Y#w!a9p_R7!Y_QA(y==#6A(-OQRhi5iOB;Uk&an`I)k#w}9Kp_+JD
z?mvx&nx*xTXCHJGxL;U~q8{|IpXhbA-Q2p%C(FF-Dw8vcp@;7De8#JPSxDvhR2lh$
z12vr#YT#pcJ+SXL|Gkoi`zkp4x(5OptsgRlOyd`+Uhlm?S+p+u_jU#z>+1M1Rz%i4
z2{q6)vp7w=SCqr;H(kB5YE!CJ^JN=Tu6*qqYp_%sLgCUg6e*b)XP|k@R&_{Ds0!ix
z4OR6bgk@WD(kSw~_KlG=ee}eO8<|dSg6V2G!<12|u*8wMFCH0A9J_Dd&S#xfa(nR!
znzUY{s&8?bXq+8r=it#d9&}Q)VQMV$!@b`zB|Gaq`)v|2{{B@R>31JXB$|KJkb^_7
zdD%G`p;qiUU5m}cc0p_Hn^ksczzi8W#ReMzKp-d
zkn0RL`ES$o?B@(E4Wqte>bi?74gpVwrrmD)YY~~>Qng*r85xLS&U)p~PS-ORVg2h?
zEUbf0mFEF;lnr^?@UhbCfCag4ThbKf2`|286`e$-@0n+4MBib#wn?Rud+?%F3>TwK
zMn3wRhT?_g=C0UUX48th;G5$&wD30-n}scOWm2)^B9CvelWESEwFmu3H~4x>Pwtsa%P@k~fo`(b*tbT7=OxVTX36(Wcx9U%r$BMOPBoL0c
z!qQfiCEz7CnYe23eESB194Rj3NOdkVxT~AP9
zm^$i`M-ffjd+uf|3_P;Y;YoHSg^RlTZ#}m;;{NPs?*YP#&>ueV{0iFmy~LkPPYX!O
zv3ciW65su`D+r*d)AA$5D;Q}K-WWHOVvcjho5IShATE6#F6DpjtnKOeDOeIysrk*b
zQtfY{{WVoZz7Ug1itZqcNfGxxFeHOLX(jZqbZKZJ`$3*^Z9!w><5lX-Hc`79X0N~d
zKhhJ)BUZxSqyaRUA$@XK+mSxNDRK&)ypPamom#ftZ>x8EP>4i|Xsad6_+I;>9JbR6
zcSu_@-Y6n4{zY&ij>N8X_7$`-Uols1vMK0;xXvG`)=BveOrhayB0bV}UEVBaiNV$h
z(jy@rB;lD39|Ge$9Bpf7tTim^PHE+n<^WoM{C641>)gf7
zTb@pYd7@DCu~x2TT+Nd*n`jkRYNb@d0@4S#vX8_huf@=?oJ8A?gi=UgGRgmTV0|{h
z3z$D6fbiPUL4~*>2?~M+Ch}X-M(0vcEic!S_xmZNZTpY+3Z;Uk(OO^L>MQ?A+*-Xw
z`^&s&BOG)n=z%)H-@oYZcG+M%3PBMihW@R`wZIR%RsNA?RbMr`8aX8kwx?+QU5{2)Y_RbDU*(xX+Vu*oW3kbx(db50xp^||=aLVN~g+DHmByO^UGpbf|K!!zYGFAuI^BlIUeH#IpUUj-AXm19O@
z*bT&Bn?1&OV)@0Iv5)W(dA08%akT(=#okwkMw1XQY84vN3;+UZzUxtraK{7hCuT_f
z0B6MXffcT$uWJeU&{;L9Y*YC;aENLtYYXh=;gd)-cQ?`)NfVqWZyoTD
z>(?MJCd_79gyAii?wP$Q8y)6swC()L7w22}kqLHOT_%9KVl2gkQH7n67?=s3N2M$T
z&DAbt4QvU#R`#`ET(TF^5VW-HyvgC!nJSr_S3*od)#Jqm<8bqhLS-mQyx)UXtwX0Y
z16jC_!B)lryDr>;oD9?zTE<>_{K7c3-eE_7F$Cy1uiOMoWoURzmm5}gfU~Zk64Eih
zhC>+7AB*4pq4}oMR=~f8nm?KrI;e)Gr?aj&?E=5~3>#Ivk2tl$yF3S$N1dQZ%sg4Z
zXBz{S{o47ViGfsk4p!em#4=i4=QN
zj@KpL*=PICw|c`{ZuYB60i;_^oAr$NXE+qLJnqak>~tD%5w<2yBeypbWcRY`mJPMY
z#m5lj83Pw}yQG^*n>-E!;1>FLv2z~1&>Yeok(}$6xjnDL9j|Sj_cIg)*dfq9i|4|kzqTMGQ@C;)AQZbH
zH1<8yB{(c#Z5P?iwOM~?2|W35ya7@cFeoYxs0cRCMFesg-?85R+Ys_mwCNXr^gV|J
zI~&J;o1eL#CM9+V-5Q^M2!
z#-AGb$eNuy??i=(;7cr3Q2?TeRAeEUxCBhV-rq&Mj#=9CS%GQYBs&C-8fki}1=?Ta
zTO%7M_?zSn;B&nIdyh&`5gCCKxbfkCvfD~FwrAUT_1YEPY5b&~uD;56z%lcNy-t=@
zAf<{{8%w?cNZ`<=1Rsn(&~j`l@xB~WKd6H)vDZN#Jo>pmZm#A1m9cT1>wRu0LX!gY
zVtXg0_D6On=#xy({Z5Y}i7}MlTM-`!U`dW2I8;feDJzw4e~(u$PQdEup4@e
zUXvGmqx21-45^<4EedZ=qykjY+
z3DLrz)LL6w>|e-a2TuvGC*I$2R3Aysj4^nkt#XbNCNL=jM6>5f48X6X0R00ZT>2n)
z1#rC4gk~33J=xrOoV87xho7#~fR*p_?a#N*iVd1N9Lo+@w;M6_hu0#7ngCClWJ{iB
z8eT7Oh_CI!+YmsqH>#ee$845C?2lM5<|uey(b75s#;C$g5W)<_)ViwB&-w(WSZhy;
z9UXffJG2EST9iJ&om_K`g0~ocUkv80k9=qRamfZcX(#C?LOT|r8_!{bT`zU^j
z*%8Bh3!ueNbqiAjA>s#(P89pSDG5O=w8+!D8InD3%q4I*(!*Eow%E>rU=HwCWG?;o
zF|5e80~j&PN~2cHQHUdZuZ`b6xH5cFh1)+K0#FQ5fPo
zegVs7m5%NLToeyZC!XwQ%K8M?uK}u*z3e=R_h+?|dhgYFT629E1sq%r8Xsc7w}N#{
zdUIVgdOZ1lJg({?
zNRc)Om$Vm#{$vAM&5)z$mXD1hKG^Y(S`X!~AItEszje>)nH#(>$P3-2Ed@BW7WJ`8
z!M4M{o1x5Pvcj?AX{4F@SvX#Bz7{>P^cQDje~aLUWi3FhdY=qjwAGeE$_}_3Mf2ANFmin{Hi3Qviu_r7VXajQZv7Jj}CO$W7bN1&Ec&EPT8C
z37|aF{qPG12U4^gCi}|jj430g1rmOsCjf!koOXD0B}N+OrNuL05gR*kXgDh*K$8`D
z924pP5@&uxGpi&>wV!{Chu+WT;wbaV5;_jV*g$XUqxIj}v(`v2pDl=PbtMjQz=Liw
zac2jbZhv&hoQ6xmP{q&(U2tNX%ER9*{YSj=J@ElZS%5&qy~x2S&3((!!RqJfysl9R
zAb_#wh|QwuAJ{o&-x-g_J*Q8u2o60vx5DLsO2JPaL47Nw4|*zbGEyGjeaa;L3J|MV5|CV&5!UeKGpE@%3roIB`VS(0979$5O$
z9XdJbvKmd6!NIP2_Ma-#kpbqfWqg0~y(a@F@$7$`j>>7Yxp>T^o?A?}yWf&uxwJ~H
zRQ)<-z(2GuTm6P7KX;h(M>UFym+9Vey~7ozg1>{^_cRbuAFsk
z==_-qypMI)Or6@s#?L)8>}v0(v=ZwiqiJ*<-6OXQ3nl+dy2qcdYi4gZd>0eR6<92S|rI@-FgGc622GU(`r5vrg>0=LG|`7|p&XjW&;UW%iY>
zHWahSh$4A&fy!nZ^m5TvVUPH4G}U$^}xvi-`)Zw<_E{JvaH~BdQ@AK)s
zoWNDhc>?VK3s$xDv1dd9UC*~xZk6{2zT8VDVOJC6wsv|l@J?8VjnpEHPCtozM#(6?0~+MTwU>sH4Gi6!9!Rj1rRq6p=K9Uo-v(+CTL+J39Mpo`)92)vTf0a_z|tPj3n
z&j!LU*cEuH+gJY73Ha4N`K^gsy}Ede@K@93151n0_bDh8PUw@E5%QX-PL41YZ&ryH
zBluV-jgo%!ta7&{5fUum{z#6`;M?Sid@I;cAluE6*L^oV)g8i@q5VXay0HB8+ob@%
zcIGTMX9n|A+3Wpo922#XZkKDvn@;f~50Yi)^O^6@Krc@#B+o1PUfGNf%b)CN%Vv9D
z$1tbO=REvD9jat0!bH}-MHgFP`^Xhh&=NW<4c!p9``lwjzG}gYHaK5(%b!fj;^w^|
zI+Wb?^F7tRa(AoBOqm8US|o(@cG|Vevrdz%@q37LWsWGd9|5_9T)7oy7Dyj
z{Y{H31Fe59^F>=+Zk!tFxn*)>5-(j67g{E_64iW@uSX4+#!^_6TB2|Hs}5gO!X31#
z%L3?!vv(etxdk&yz8g9Ck8Hofj@?kH>F*u7b%!)ki@VKU+v%FrWLG##13E__T7eEi
zbjw&8Vp#L=`w=h;iE$;_lJc>jolg3#|uUS_uKs<5-Obk{QbWz4VaiSEZ7*Ru_yWeB0*Q^@YNp4Fo<1rEu50I4lfxxb>1jW>RkXUt+z3RAXsc!2O`C
zCT^Fu7EPbqI9x`JQE^*Ahco}SlH=Jo8+QiF^+C$t)!_UKRpVxqzvOc5-Fm)4c|oV}
z0B${1AsLQ;|D2m^o^u8rwE}roWdnaqjazTZ^Uoyy7U%1hS#n+a(5&M4E |