-
Notifications
You must be signed in to change notification settings - Fork 0
/
configs.py
166 lines (159 loc) · 7.36 KB
/
configs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
from typing import Any, Dict, Union
import blobfile as bf
import torch
import torch.nn as nn
import yaml
from shap_e.models.generation.latent_diffusion import SplitVectorDiffusion
from shap_e.models.generation.perceiver import PointDiffusionPerceiver
from shap_e.models.generation.pooled_mlp import PooledMLP
from shap_e.models.generation.transformer import (
CLIPImageGridPointDiffusionTransformer,
CLIPImageGridUpsamplePointDiffusionTransformer,
CLIPImagePointDiffusionTransformer,
PointDiffusionTransformer,
UpsamplePointDiffusionTransformer,
)
from shap_e.models.nerf.model import MLPNeRFModel, VoidNeRFModel
from shap_e.models.nerf.renderer import OneStepNeRFRenderer, TwoStepNeRFRenderer
from shap_e.models.nerstf.mlp import MLPDensitySDFModel, MLPNeRSTFModel
from shap_e.models.nerstf.renderer import NeRSTFRenderer
from shap_e.models.nn.meta import batch_meta_state_dict
from shap_e.models.stf.mlp import MLPSDFModel, MLPTextureFieldModel
from shap_e.models.stf.renderer import STFRenderer
from shap_e.models.transmitter.base import ChannelsDecoder, Transmitter, VectorDecoder
from shap_e.models.transmitter.channels_encoder import (
PointCloudPerceiverChannelsEncoder,
PointCloudTransformerChannelsEncoder,
)
from shap_e.models.transmitter.multiview_encoder import MultiviewTransformerEncoder
from shap_e.models.transmitter.pc_encoder import (
PointCloudPerceiverEncoder,
PointCloudTransformerEncoder,
)
from shap_e.models.volume import BoundingBoxVolume, SphericalVolume, UnboundedVolume
def model_from_config(config: Union[str, Dict[str, Any]], device: torch.device) -> nn.Module:
print(config)
if isinstance(config, str):
print("config", config)
with bf.BlobFile(config, "rb") as f:
obj = yaml.load(f, Loader=yaml.SafeLoader)
return model_from_config(obj, device=device)
config = config.copy()
name = config.pop("name")
if name == "PointCloudTransformerEncoder":
return PointCloudTransformerEncoder(device=device, dtype=torch.float32, **config)
elif name == "PointCloudPerceiverEncoder":
return PointCloudPerceiverEncoder(device=device, dtype=torch.float32, **config)
elif name == "PointCloudTransformerChannelsEncoder":
return PointCloudTransformerChannelsEncoder(device=device, dtype=torch.float32, **config)
elif name == "PointCloudPerceiverChannelsEncoder":
return PointCloudPerceiverChannelsEncoder(device=device, dtype=torch.float32, **config)
elif name == "MultiviewTransformerEncoder":
return MultiviewTransformerEncoder(device=device, dtype=torch.float32, **config)
elif name == "Transmitter":
renderer = model_from_config(config.pop("renderer"), device=device)
param_shapes = {
k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
}
encoder_config = config.pop("encoder").copy()
encoder_config["param_shapes"] = param_shapes
encoder = model_from_config(encoder_config, device=device)
return Transmitter(encoder=encoder, renderer=renderer, **config)
elif name == "VectorDecoder":
renderer = model_from_config(config.pop("renderer"), device=device)
param_shapes = {
k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
}
return VectorDecoder(param_shapes=param_shapes, renderer=renderer, device=device, **config)
elif name == "ChannelsDecoder":
renderer = model_from_config(config.pop("renderer"), device=device)
param_shapes = {
k: v.shape[1:] for k, v in batch_meta_state_dict(renderer, batch_size=1).items()
}
return ChannelsDecoder(
param_shapes=param_shapes, renderer=renderer, device=device, **config
)
elif name == "OneStepNeRFRenderer":
config = config.copy()
for field in [
# Required
"void_model",
"foreground_model",
"volume",
# Optional to use NeRF++
"background_model",
"outer_volume",
]:
if field in config:
config[field] = model_from_config(config.pop(field).copy(), device)
return OneStepNeRFRenderer(device=device, **config)
elif name == "TwoStepNeRFRenderer":
config = config.copy()
for field in [
# Required
"void_model",
"coarse_model",
"fine_model",
"volume",
# Optional to use NeRF++
"coarse_background_model",
"fine_background_model",
"outer_volume",
]:
if field in config:
config[field] = model_from_config(config.pop(field).copy(), device)
return TwoStepNeRFRenderer(device=device, **config)
elif name == "PooledMLP":
return PooledMLP(device, **config)
elif name == "PointDiffusionTransformer":
return PointDiffusionTransformer(device=device, dtype=torch.float32, **config)
elif name == "PointDiffusionPerceiver":
return PointDiffusionPerceiver(device=device, dtype=torch.float32, **config)
elif name == "CLIPImagePointDiffusionTransformer":
return CLIPImagePointDiffusionTransformer(device=device, dtype=torch.float32, **config)
elif name == "CLIPImageGridPointDiffusionTransformer":
return CLIPImageGridPointDiffusionTransformer(device=device, dtype=torch.float32, **config)
elif name == "UpsamplePointDiffusionTransformer":
return UpsamplePointDiffusionTransformer(device=device, dtype=torch.float32, **config)
elif name == "CLIPImageGridUpsamplePointDiffusionTransformer":
return CLIPImageGridUpsamplePointDiffusionTransformer(
device=device, dtype=torch.float32, **config
)
elif name == "SplitVectorDiffusion":
inner_config = config.pop("inner")
d_latent = config.pop("d_latent")
latent_ctx = config.pop("latent_ctx", 1)
inner_config["input_channels"] = d_latent // latent_ctx
inner_config["n_ctx"] = latent_ctx
inner_config["output_channels"] = d_latent // latent_ctx * 2
inner_model = model_from_config(inner_config, device)
return SplitVectorDiffusion(
device=device, wrapped=inner_model, n_ctx=latent_ctx, d_latent=d_latent
)
elif name == "STFRenderer":
config = config.copy()
for field in ["sdf", "tf", "volume"]:
config[field] = model_from_config(config.pop(field), device)
return STFRenderer(device=device, **config)
elif name == "NeRSTFRenderer":
config = config.copy()
for field in ["sdf", "tf", "nerstf", "void", "volume"]:
if field not in config:
continue
config[field] = model_from_config(config.pop(field), device)
config.setdefault("sdf", None)
config.setdefault("tf", None)
config.setdefault("nerstf", None)
return NeRSTFRenderer(device=device, **config)
model_cls = {
"MLPSDFModel": MLPSDFModel,
"MLPTextureFieldModel": MLPTextureFieldModel,
"MLPNeRFModel": MLPNeRFModel,
"MLPDensitySDFModel": MLPDensitySDFModel,
"MLPNeRSTFModel": MLPNeRSTFModel,
"VoidNeRFModel": VoidNeRFModel,
"BoundingBoxVolume": BoundingBoxVolume,
"SphericalVolume": SphericalVolume,
"UnboundedVolume": UnboundedVolume,
}[name]
return model_cls(device=device, **config)