Skip to content

Commit

Permalink
update render path
Browse files Browse the repository at this point in the history
  • Loading branch information
samcao416 committed May 20, 2022
1 parent b9b63e3 commit 1978393
Show file tree
Hide file tree
Showing 12 changed files with 78 additions and 7 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
log
log
run_commands
2 changes: 2 additions & 0 deletions configs/big_scene/fangcang/all.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/fangcang/group1.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/fangcang/group2.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/fangcang/group3.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
3 changes: 2 additions & 1 deletion configs/big_scene/fangcang/group4.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@ update_AlphaMask_list = [2000,4000]

N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/outdoor_r2_agi.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/skdfs/1.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
2 changes: 2 additions & 0 deletions configs/big_scene/skdfs/3.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ update_AlphaMask_list = [2000,4000]
N_vis = 5
vis_every = 10000

render_train = 1
render_test = 1
render_path = 1

n_lamb_sigma = [16,16,16]
n_lamb_sh = [48,48,48]
Expand Down
Binary file modified dataLoader/__pycache__/big_scene.cpython-38.pyc
Binary file not shown.
55 changes: 53 additions & 2 deletions dataLoader/big_scene.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ def __init__(self, datadir, split='train', downsample=1.0, is_stack=False, N_vis

self.scene_bbox = torch.Tensor([[-4.0, -4.0, -4.0,], [4.0, 4.0, 4.0]])
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
self.read_meta()
if self.split == 'render':
self.read_meta_render()
else:
self.read_meta()
self.define_proj_mat()

self.white_bg = False
Expand Down Expand Up @@ -96,7 +99,48 @@ def read_meta(self):
self.all_rays = torch.stack(self.all_rays, 0)
self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1, *self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3)


def read_meta_render(self):

with open(os.path.join(self.root_dir, "cam_tra", "camera_render.json"), 'r') as f:
self.meta = json.load(f)

w = int(self.meta['w'] / self.downsample)
h = int(self.meta['h'] / self.downsample)
self.img_wh = [w, h]
self.focal = self.meta['fl_x'] * (w / self.meta['w'])

# ray directions for all pixels, same for all images (same H, W, focal)
self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3)
self.directoins = self.directions / torch.norm(self.directions, dim = -1, keepdim=True)
self.intrinsics = torch.tensor([[self.focal,0,w/2],[0,self.focal,h/2],[0,0,1]]).float()

self.image_paths = []
self.poses = []
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_depths = []

img_eval_interval = 1 if self.N_vis <0 else len(self.meta['frames']) // self.N_vis

idxs = list(range(0, len(self.meta['frames']), img_eval_interval))
for i in tqdm(idxs, desc=f'Loading data {self.split} ({len(idxs)})'): #img_list:#

frame = self.meta['frames'][i]
pose = np.array(frame['transform_matrix']) @ self.blender2opencv
c2w = torch.FloatTensor(pose)
self.poses +=[c2w]

rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6)

self.poses = torch.stack(self.poses)
if not self.is_stack:
self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames']) * h * w, 3)

else:
self.all_rays = torch.stack(self.all_rays, 0)

def define_proj_mat(self):
self.proj_mat = self.intrinsics.unsqueeze(0) @ torch.inverse(self.poses)[:, :3]

Expand All @@ -113,6 +157,13 @@ def __getitem__(self, idx):
sample = {'rays': self.all_rays[idx],
'rgbs': self.all_rgbs[idx]}

#elif self.split == 'test':
# rays = self.all_rays[idx]
#
# sample = {'rays': self.all_rays[idx],
# }


else: # create data for each image separately

img = self.all_rgbs[idx]
Expand Down
10 changes: 7 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,15 +287,19 @@ def reconstruction(args):
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test = evaluation(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
#render_dataset = dataset(args.datadir, split= 'render', downsample = 1.0, is_stack = True)
# PSNRs_test = evaluation(render_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_all/',
# N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
summary_writer.add_scalar('test/psnr_all', np.mean(PSNRs_test), global_step=iteration)
print(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')

if args.render_path:
c2ws = test_dataset.render_path
# c2ws = test_dataset.poses
render_dataset = dataset(args.datadir, split= 'render', downsample = 1.0, is_stack = True)
#c2ws = render_dataset.render_path
c2ws = render_dataset.poses
print('========>',c2ws.shape)
os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
evaluation_path(test_dataset,tensorf, c2ws, renderer, f'{logfolder}/imgs_path_all/',
evaluation_path(render_dataset,tensorf, c2ws, renderer, f'{logfolder}/imgs_path_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)


Expand Down

0 comments on commit 1978393

Please sign in to comment.