Skip to content

Commit f25f060

Browse files
committed
added support for bgeo
1 parent 9de99b2 commit f25f060

File tree

3 files changed

+117
-4
lines changed

3 files changed

+117
-4
lines changed

mzd/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from .mzd import readMZD_to_bpymesh,readMZD_to_meshio
2+
from .bgeo import readbgeo_to_meshio
23

3-
4-
__all__ = [readMZD_to_bpymesh,readMZD_to_meshio]
4+
__all__ = [readMZD_to_bpymesh,readMZD_to_meshio,readbgeo_to_meshio]

mzd/bgeo.py

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
import gzip
2+
import numpy as np
3+
import meshio
4+
5+
6+
def readbgeo_to_meshio(filepath):
7+
with gzip.open(filepath, 'r') as file:
8+
byte = file.read(5)
9+
if byte != b"BgeoV":
10+
raise Exception('not bgeo file format')
11+
byte = file.read(4)
12+
version = int.from_bytes(byte, byteorder="big")
13+
if version != 5:
14+
raise Exception('bgeo file not version 5')
15+
16+
header = {}
17+
point_attributes = {}
18+
point_attributes_names = []
19+
point_attributes_sizes = []
20+
point_attributes_types = []
21+
position = None
22+
23+
byte = file.read(4)
24+
header['nPoints'] = int.from_bytes(byte, byteorder="big")
25+
26+
byte = file.read(4)
27+
header['nPrims'] = int.from_bytes(byte, byteorder="big")
28+
29+
byte = file.read(4)
30+
header['nPointGroups'] = int.from_bytes(byte, byteorder="big")
31+
32+
byte = file.read(4)
33+
header['nPrimGroups'] = int.from_bytes(byte, byteorder="big")
34+
35+
byte = file.read(4)
36+
header['nPointAttrib'] = int.from_bytes(byte, byteorder="big")
37+
38+
byte = file.read(4)
39+
header['nVertexAttrib'] = int.from_bytes(byte, byteorder="big")
40+
41+
byte = file.read(4)
42+
header['nPrimAttrib'] = int.from_bytes(byte, byteorder="big")
43+
44+
byte = file.read(4)
45+
header['nAttrib'] = int.from_bytes(byte, byteorder="big")
46+
47+
particle_size = 4
48+
49+
for _ in range(header['nPointAttrib']):
50+
byte = file.read(2)
51+
namelength = int.from_bytes(byte, byteorder="big")
52+
name_binary = file.read(namelength)
53+
name = name_binary.decode('utf-8')
54+
point_attributes_names.append(name)
55+
56+
byte = file.read(2)
57+
size = int.from_bytes(byte, byteorder="big")
58+
point_attributes_sizes.append(size)
59+
particle_size += size
60+
61+
byte = file.read(4)
62+
houdni_type = int.from_bytes(byte, byteorder="big")
63+
if houdni_type == 0:
64+
point_attributes_types.append('FLOAT')
65+
# read default value
66+
# not going to do anything about it
67+
byte = file.read(size * 4)
68+
elif houdni_type == 1:
69+
point_attributes_types.append('INT')
70+
# read default value
71+
# not going to do anything about it
72+
byte = file.read(size * 4)
73+
elif houdni_type == 5:
74+
point_attributes_types.append('VECTOR')
75+
# read default value
76+
# not going to do anything about it
77+
byte = file.read(size * 4)
78+
else:
79+
raise Exception('houdni_type unknown/ unsupported')
80+
byte = file.read(particle_size * header['nPoints'] * 4)
81+
# > means big endian
82+
attribute_data = np.frombuffer(byte, dtype='>f')
83+
attribute_data = np.reshape(attribute_data, (header['nPoints'], particle_size))
84+
# the first 3 column is its position data
85+
position = attribute_data[:, :3]
86+
# the 4th column is homogeneous coordiante, which is all 1, and will be ignored
87+
88+
current_attribute_start_point = 4
89+
for i in range(header['nPointAttrib']):
90+
if point_attributes_types[i] == 'FLOAT':
91+
point_attributes[point_attributes_names[i]] = attribute_data[:, current_attribute_start_point]
92+
current_attribute_start_point += 1
93+
elif point_attributes_types[i] == 'VECTOR':
94+
point_attributes[
95+
point_attributes_names[i]] = attribute_data[:,
96+
current_attribute_start_point:current_attribute_start_point + 3]
97+
current_attribute_start_point += 3
98+
elif point_attributes_types[i] == 'INT':
99+
data = (attribute_data[:, current_attribute_start_point]).tobytes()
100+
# > means big endian
101+
point_attributes[point_attributes_names[i]] = np.frombuffer(data, dtype='>i')
102+
current_attribute_start_point += 1
103+
remaining = file.read()
104+
if not remaining == b'\x00\xff':
105+
raise Exception("file didn't end")
106+
return meshio.Mesh(position, [('vertex', [])], point_data=point_attributes)
107+

simloader/importer.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,10 @@ def create_obj(fileseq, use_relaitve, transform_matrix=Matrix([[1, 0, 0, 0], [0,
104104
enabled = True
105105

106106
try:
107-
meshio_mesh = meshio.read(filepath)
107+
if filepath.endswith('.bgeo'):
108+
meshio_mesh = mzd.readbgeo_to_meshio(filepath)
109+
else:
110+
meshio_mesh = meshio.read(filepath)
108111
except Exception as e:
109112
show_message_box("Error when reading: " + filepath + ",\n" + traceback.format_exc(),
110113
"Meshio Loading Error" + str(e),
@@ -174,7 +177,10 @@ def update_obj(scene, depsgraph=None):
174177
else:
175178
filepath = fs[current_frame % len(fs)]
176179
try:
177-
meshio_mesh = meshio.read(filepath)
180+
if filepath.endswith('.bgeo'):
181+
meshio_mesh = mzd.readbgeo_to_meshio(filepath)
182+
else:
183+
meshio_mesh = meshio.read(filepath)
178184
except Exception as e:
179185
show_message_box("Error when reading: " + filepath + ",\n" + traceback.format_exc(),
180186
"Meshio Loading Error" + str(e),

0 commit comments

Comments
 (0)