Skip to content

Commit

Permalink
clean
Browse files Browse the repository at this point in the history
  • Loading branch information
陈安沛 committed Mar 18, 2022
1 parent 140322f commit c64270d
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 66 deletions.
9 changes: 2 additions & 7 deletions dataLoader/nsvf.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self, datadir, split='train', downsample=1.0, wh=[800,800], is_stac
self.define_transforms()

self.white_bg = True
self.near_far = [0.5,5.0]
self.near_far = [0.5,6.0]
self.scene_bbox = torch.from_numpy(np.loadtxt(f'{self.root_dir}/bbox.txt')).float()[:6].view(2,3)
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
self.read_meta()
Expand Down Expand Up @@ -111,15 +111,10 @@ def read_meta(self):
c2w = torch.FloatTensor(c2w)
self.poses.append(c2w) # C2W
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
# vdirs = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# t_minmax = aabb(rays_o, vdirs, self.scene_bbox)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 8)

# w2c = torch.inverse(c2w)
# depth = (w2c[:3,:3] @corners.t() + w2c[:3,3:])[2]
# near_far = [max(0.0, torch.min(depth)), torch.max(depth)]
# print(near_far)

#

self.poses = torch.stack(self.poses)
if 'train' == self.split:
Expand Down
3 changes: 0 additions & 3 deletions dataLoader/tankstemple.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,16 +160,13 @@ def read_meta(self):
c2w = torch.FloatTensor(c2w)
self.poses.append(c2w) # C2W
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
# vdirs = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# t_minmax = aabb(rays_o, vdirs, self.scene_bbox)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 8)

self.poses = torch.stack(self.poses)

center = torch.mean(self.scene_bbox, dim=0)
radius = torch.norm(self.scene_bbox[1]-center)*1.2
up = torch.mean(self.poses[:, :3, 1], dim=0).tolist()
# print(center,radius,up,-0.2*up[1])
pos_gen = circle(radius=radius, h=-0.2*up[1], axis='y')
self.render_path = gen_path(pos_gen, up=up,frames=200)
self.render_path[:, :3, 3] += center
Expand Down
13 changes: 0 additions & 13 deletions models/tensoRF.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,13 +217,6 @@ def compute_densityfeature(self, xyz_sampled):
align_corners=True).view(-1, *xyz_sampled.shape[:1])
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)

# if 0==idx_plane:
# sigma_feature = sigma_feature + torch.sum((plane_coef_point * line_coef_point)[[10, 0, 6, 4]], dim=0)
# elif 1==idx_plane:
# sigma_feature = sigma_feature + torch.sum((plane_coef_point * line_coef_point)[[7, 6, 5, 8]], dim=0)
# else:
# sigma_feature = sigma_feature + torch.sum((plane_coef_point * line_coef_point)[[15, 12, 3, 9]], dim=0)

return sigma_feature


Expand Down Expand Up @@ -312,8 +305,6 @@ def shrink(self, new_aabb):





class TensorCP(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorCP, self).__init__(aabb, gridSize, device, **kargs)
Expand Down Expand Up @@ -354,10 +345,6 @@ def compute_densityfeature(self, xyz_sampled):
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.density_line[2], coordinate_line[[2]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])

# line_feats = F.grid_sample(self.line_coef[:, -self.density_n_comp:], coordinate_line, align_corners=True).view(
# 3, self.density_n_comp, *xyz_sampled.shape[:1])

sigma_feature = torch.sum(line_coef_point, dim=0)


Expand Down
9 changes: 2 additions & 7 deletions models/tensorBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,6 @@ def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):

rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
# print(rays_pts.shape, interpx.shape, mask_outbbox.shape, N_samples)
return rays_pts, interpx, ~mask_outbbox

def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
Expand Down Expand Up @@ -305,16 +304,13 @@ def updateAlphaMask(self, gridSize=(200,200,200)):

total_voxels = gridSize[0] * gridSize[1] * gridSize[2]

# voxel_size = self.aabbSize / (torch.tensor(gridSize).to(self.device)-1)
# voxel_norm = torch.norm(voxel_size)

samples = torch.stack(torch.meshgrid(
torch.linspace(0, 1, gridSize[0]),
torch.linspace(0, 1, gridSize[1]),
torch.linspace(0, 1, gridSize[2]),
), -1).to(self.device)
dense_xyz = self.aabb[0] * (1-samples) + self.aabb[1] * samples
# dense_xyz = dense_xyz.view(-1,3)

dense_xyz = dense_xyz.transpose(0,2).contiguous()
alpha = torch.zeros_like(dense_xyz[...,0])
for i in range(gridSize[2]):
Expand Down Expand Up @@ -397,8 +393,7 @@ def compute_alpha(self, xyz_locs, length=1):
validsigma = self.feature2density(sigma_feature)
sigma[alpha_mask] = validsigma

# density = F.grid_sample(grid_rgba.density_volume, dense_xyz.view(1,-1,1,1,3)).view(-1) *4/350*25
# print(density.shape)

alpha = 1 - torch.exp(-sigma*length).view(xyz_locs.shape[:-1])

return alpha
Expand Down
33 changes: 0 additions & 33 deletions renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,36 +143,3 @@ def evaluation_path(test_dataset,tensorf, c2ws, renderer, savePath=None, N_vis=5

return PSNRs

def rgba_evaluation(test_dataset, grid_rgba, world2ndc, args, savePath=None, N_vis=5, prtx='', white_bg=False, N_samples=256):
PSNRs, rgb_maps = [], []
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass

img_eval_interval = 1 if N_vis < 0 else test_dataset.all_rays.shape[0] // N_vis
idxs = list(range(0, test_dataset.all_rays.shape[0], img_eval_interval))
for idx, samples in tqdm(enumerate(test_dataset.all_rays[0::img_eval_interval])):

W, H = test_dataset.img_wh
rays = samples.view(-1, 8)

rgb_map, _, depth_map, _, _ = grid_rgba.render(rays, world2ndc, chunk=8192, lindisp=args.lindisp, perturb=0.0, white_bg = white_bg, N_samples=N_samples)
rgb_map = rgb_map.clamp(0.0, 1.0)

rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape(H, W).cpu()

depth_map, _ = visualize_depth_numpy(depth_map.numpy())
if len(test_dataset.all_rgbs):
loss = torch.mean((rgb_map - test_dataset.all_rgbs[idxs[idx]].view(H, W, 3)) ** 2)
PSNRs.append(-10.0 * np.log(loss.item()) / np.log(10.0))

rgb_map = (rgb_map.numpy() * 255).astype('uint8')
rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
rgb_maps.append(rgb_map)
if savePath is not None:
imageio.imwrite(f'{savePath}/{prtx}{idx:03d}.png', rgb_map)

imageio.mimwrite(f'{savePath}/{prtx}_video.mp4', np.stack(rgb_maps), fps=30, quality=10)
return PSNRs
3 changes: 0 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,9 +291,6 @@ def reconstruction(args):
torch.set_default_dtype(torch.float32)
torch.manual_seed(20121202)
np.random.seed(20121202)
# torch.cuda.manual_seed(20121202)
# random.seed(20121202)
# torch.backends.cudnn.deterministic = True

args = config_parser()
print(args)
Expand Down

0 comments on commit c64270d

Please sign in to comment.