Skip to content

Commit

Permalink
misc
Browse files Browse the repository at this point in the history
  • Loading branch information
ashawkey committed Jun 23, 2022
1 parent 79da0e7 commit 7b8ee21
Show file tree
Hide file tree
Showing 12 changed files with 31 additions and 39 deletions.
2 changes: 1 addition & 1 deletion encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def forward(self, input, **kwargs):
def get_encoder(encoding, input_dim=3,
multires=6,
degree=4,
num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=True,
num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False,
**kwargs):

if encoding == 'None':
Expand Down
2 changes: 1 addition & 1 deletion gridencoder/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def forward(ctx, inputs, embeddings, offsets, per_level_scale, base_resolution,
if calc_grad_inputs:
dy_dx = torch.empty(B, L * D * C, device=inputs.device, dtype=embeddings.dtype)
else:
dy_dx = torch.empty(1, device=inputs.device, dtype=embeddings.dtype)
dy_dx = torch.empty(1, device=inputs.device, dtype=embeddings.dtype) # placeholder... TODO: a better way?

_backend.grid_encode_forward(inputs, embeddings, offsets, outputs, B, D, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners)

Expand Down
5 changes: 2 additions & 3 deletions main_CCNeRF.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=512, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
parser.add_argument('--l1_reg_weight', type=float, default=1e-5)
Expand Down Expand Up @@ -184,9 +185,7 @@ def load_model(path):
trainer.upsample_resolutions = upsample_resolutions

if opt.gui:
trainer.train_loader = train_loader # attach dataloader to trainer

gui = NeRFGUI(opt, trainer)
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()

else:
Expand Down
5 changes: 2 additions & 3 deletions main_nerf.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=512, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")

### network backbone options
Expand Down Expand Up @@ -130,9 +131,7 @@
trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, optimizer=optimizer, criterion=criterion, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, scheduler_update_every_step=True, metrics=[PSNRMeter()], use_checkpoint=opt.ckpt, eval_interval=50)

if opt.gui:
trainer.train_loader = train_loader # attach dataloader to trainer

gui = NeRFGUI(opt, trainer)
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()

else:
Expand Down
5 changes: 2 additions & 3 deletions main_tensoRF.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=512, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
parser.add_argument('--l1_reg_weight', type=float, default=1e-4)
Expand Down Expand Up @@ -127,9 +128,7 @@
trainer.upsample_resolutions = upsample_resolutions

if opt.gui:
trainer.train_loader = train_loader # attach dataloader to trainer

gui = NeRFGUI(opt, trainer)
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()

else:
Expand Down
10 changes: 7 additions & 3 deletions nerf/gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,21 @@ def pan(self, dx, dy, dz=0):


class NeRFGUI:
def __init__(self, opt, trainer, debug=True):
def __init__(self, opt, trainer, train_loader=None, debug=True):
self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
self.W = opt.W
self.H = opt.H
self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)
self.trainer = trainer
self.debug = debug
self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg
self.training = False
self.step = 0 # training step

self.trainer = trainer
self.train_loader = train_loader
if train_loader is not None:
self.trainer.error_map = train_loader._data.error_map

self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)
self.need_update = True # camera moved, should reset accumulation
self.spp = 1 # sample per pixel
Expand All @@ -93,7 +97,7 @@ def train_step(self):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()

outputs = self.trainer.train_gui(self.trainer.train_loader, step=self.train_steps)
outputs = self.trainer.train_gui(self.train_loader, step=self.train_steps)

ender.record()
torch.cuda.synchronize()
Expand Down
2 changes: 1 addition & 1 deletion nerf/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def __init__(self, opt, device, type='train', downscale=1, n_test=10):

def collate(self, index):

B = len(index) # always 1
B = len(index) # a list of length 1

# random pose without gt images.
if self.rand_pose == 0 or index[0] >= len(self.poses):
Expand Down
13 changes: 6 additions & 7 deletions nerf/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,10 @@ def train_gui(self, train_loader, step=16):

loader = iter(train_loader)

# mark untrained grid
if self.global_step == 0:
self.model.mark_untrained_grid(train_loader._data.poses, train_loader._data.intrinsics)

for _ in range(step):

# mimic an infinite loop dataloader (in case the total dataset is smaller than step)
Expand All @@ -605,13 +609,8 @@ def train_gui(self, train_loader, step=16):
loader = iter(train_loader)
data = next(loader)

# mark untrained grid
if self.global_step == 0:
self.model.mark_untrained_grid(train_loader._data.poses, train_loader._data.intrinsics)
self.error_map = train_loader._data.error_map

# update grid every 16 steps
if self.model.cuda_ray and self.global_step % 16 == 0:
if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:
with torch.cuda.amp.autocast(enabled=self.fp16):
self.model.update_extra_state()

Expand Down Expand Up @@ -725,7 +724,7 @@ def train_one_epoch(self, loader):
for data in loader:

# update grid every 16 steps
if self.model.cuda_ray and self.global_step % 16 == 0:
if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:
with torch.cuda.amp.autocast(enabled=self.fp16):
self.model.update_extra_state()

Expand Down
4 changes: 2 additions & 2 deletions scripts/llff2nerf.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ def rotmat(a, b):

parser = argparse.ArgumentParser()
parser.add_argument('path', type=str, help="root directory to the LLFF dataset (contains images/ and pose_bounds.npy)")
parser.add_argument('--images', type=str, default='images', help="images folder")
parser.add_argument('--downscale', type=float, default=1, help="image size down scale")
parser.add_argument('--images', type=str, default='images', help="images folder (do not include full path, e.g., just use `images_4`)")
parser.add_argument('--downscale', type=float, default=1, help="image size down scale, e.g., 4")

opt = parser.parse_args()
print(f'[INFO] process {opt.path}')
Expand Down
2 changes: 1 addition & 1 deletion scripts/run_nerf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=1 python main_nerf.py data/nerf_synthetic
#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/TanksAndTemple/Barn --workspace trial_nerf_barn -O --mode blender --bound 1.0 --scale 0.33 --dt_gamma 0

#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/firekeeper --workspace trial_nerf_firekeeper_bg_32 -O --bound 1.0 --scale 0.33 --bg_radius 32 #--gui #--test
#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/garden --workspace trial_nerf_garden_bg_32 --cuda_ray --fp16 --bound 4.0 --scale 0.33 --bg_radius 32 #--gui --test
#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/garden --workspace trial_nerf_garden_bound_16 --cuda_ray --fp16 --bound 16.0 --scale 0.33 #--gui --test

#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/vasedeck --workspace trial_nerf_vasedeck -O --bound 4.0 --scale 0.33 #--gui #--test
#OMP_NUM_THREADS=8 CUDA_VISIBLE_DEVICES=2 python main_nerf.py data/vasedeck --workspace trial_nerf_vasedeck_bg_32 -O --bound 4.0 --scale 0.33 --bg_radius 32 #--gui #--test
16 changes: 4 additions & 12 deletions tensoRF/network_cc.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,6 @@ def __init__(self,
self.group_mat_density = [np.diff(rank_mat_density, prepend=0)]
self.group_vec = [np.diff(rank_vec, prepend=0)]
self.group_mat = [np.diff(rank_mat, prepend=0)]

# for composition
# self.offset_vec_density = [0]
# self.offset_mat_density = [0]
# self.offset_vec = [0]
# self.offset_mat = [0]

self.mat_ids = [[0, 1], [0, 2], [1, 2]]
self.vec_ids = [2, 1, 0]
Expand Down Expand Up @@ -153,8 +147,8 @@ def compute_features_density(self, x, K=-1, residual=False, oid=0):

last_y = None

offset_vec = oid # self.offset_vec_density[oid]
offset_mat = oid # self.offset_mat_density[oid]
offset_vec = oid
offset_mat = oid

for k in range(K):

Expand All @@ -166,7 +160,6 @@ def compute_features_density(self, x, K=-1, residual=False, oid=0):
F.grid_sample(self.U_vec_density[3 * offset_vec + 2], vec_coord[[2]], align_corners=False).view(-1, N) # [r, N]

y = y + (self.S_vec_density[offset_vec] @ vec_feat)
#y = y + vec_feat.sum(0, keepdim=True)

offset_vec += 1

Expand All @@ -176,7 +169,6 @@ def compute_features_density(self, x, K=-1, residual=False, oid=0):
F.grid_sample(self.U_mat_density[3 * offset_mat + 2], mat_coord[[2]], align_corners=False).view(-1, N) # [r, N]

y = y + (self.S_mat_density[offset_mat] @ mat_feat) # [out_dim, N]
#y = y + mat_feat.sum(0, keepdim=True)

offset_mat += 1

Expand Down Expand Up @@ -217,8 +209,8 @@ def compute_features(self, x, K=-1, residual=False, oid=0):

last_y = None

offset_vec = oid # self.offset_vec[oid]
offset_mat = oid # self.offset_mat[oid]
offset_vec = oid
offset_mat = oid

for k in range(K):

Expand Down
4 changes: 2 additions & 2 deletions tensoRF/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def train_one_epoch(self, loader):
for data in loader:

# update grid every 16 steps
if self.model.cuda_ray and self.global_step % 16 == 0:
if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:
with torch.cuda.amp.autocast(enabled=self.fp16):
self.model.update_extra_state()

Expand Down Expand Up @@ -176,7 +176,7 @@ def train_gui(self, train_loader, step=16):
self.error_map = train_loader._data.error_map

# update grid every 16 steps
if self.model.cuda_ray and self.global_step % 16 == 0:
if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:
with torch.cuda.amp.autocast(enabled=self.fp16):
self.model.update_extra_state()

Expand Down

0 comments on commit 7b8ee21

Please sign in to comment.