|
- import numpy as np
- import torch
-
- trans_t = lambda t : torch.Tensor([
- [1,0,0,0],
- [0,1,0,0],
- [0,0,1,t],
- [0,0,0,1]]).float()
-
- rot_phi = lambda phi : torch.Tensor([
- [1,0,0,0],
- [0,np.cos(phi),-np.sin(phi),0],
- [0,np.sin(phi), np.cos(phi),0],
- [0,0,0,1]]).float()
-
- rot_theta = lambda th : torch.Tensor([
- [np.cos(th),0,-np.sin(th),0],
- [0,1,0,0],
- [np.sin(th),0, np.cos(th),0],
- [0,0,0,1]]).float()
-
-
- def pose_spherical(theta, phi, radius):
- c2w = trans_t(radius)
- c2w = rot_phi(phi/180.*np.pi) @ c2w
- c2w = rot_theta(theta/180.*np.pi) @ c2w
- c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
- return c2w
-
-
-
- def circle(radius=3.5, h=0.0, axis='z', t0=0, r=1):
- if axis == 'z':
- return lambda t: [radius * np.cos(r * t + t0), radius * np.sin(r * t + t0), h]
- elif axis == 'y':
- return lambda t: [radius * np.cos(r * t + t0), h, radius * np.sin(r * t + t0)]
- else:
- return lambda t: [h, radius * np.cos(r * t + t0), radius * np.sin(r * t + t0)]
-
-
- def cross(x, y, axis=0):
- T = torch if isinstance(x, torch.Tensor) else np
- return T.cross(x, y, axis)
-
-
- def normalize(x, axis=-1, order=2):
- if isinstance(x, torch.Tensor):
- l2 = x.norm(p=order, dim=axis, keepdim=True)
- return x / (l2 + 1e-8), l2
-
- else:
- l2 = np.linalg.norm(x, order, axis)
- l2 = np.expand_dims(l2, axis)
- l2[l2 == 0] = 1
- return x / l2,
-
-
- def cat(x, axis=1):
- if isinstance(x[0], torch.Tensor):
- return torch.cat(x, dim=axis)
- return np.concatenate(x, axis=axis)
-
-
- def look_at_rotation(camera_position, at=None, up=None, inverse=False, cv=False):
- """
- This function takes a vector 'camera_position' which specifies the location
- of the camera in world coordinates and two vectors `at` and `up` which
- indicate the position of the object and the up directions of the world
- coordinate system respectively. The object is assumed to be centered at
- the origin.
- The output is a rotation matrix representing the transformation
- from world coordinates -> view coordinates.
- Input:
- camera_position: 3
- at: 1 x 3 or N x 3 (0, 0, 0) in default
- up: 1 x 3 or N x 3 (0, 1, 0) in default
- """
-
- if at is None:
- at = torch.zeros_like(camera_position)
- else:
- at = torch.tensor(at).type_as(camera_position)
- if up is None:
- up = torch.zeros_like(camera_position)
- up[2] = -1
- else:
- up = torch.tensor(up).type_as(camera_position)
-
- z_axis = normalize(at - camera_position)[0]
- x_axis = normalize(cross(up, z_axis))[0]
- y_axis = normalize(cross(z_axis, x_axis))[0]
-
- R = cat([x_axis[:, None], y_axis[:, None], z_axis[:, None]], axis=1)
- return R
-
-
- def gen_path(pos_gen, at=(0, 0, 0), up=(0, -1, 0), frames=180):
- c2ws = []
- for t in range(frames):
- c2w = torch.eye(4)
- cam_pos = torch.tensor(pos_gen(t * (360.0 / frames) / 180 * np.pi))
- cam_rot = look_at_rotation(cam_pos, at=at, up=up, inverse=False, cv=True)
- c2w[:3, 3], c2w[:3, :3] = cam_pos, cam_rot
- c2ws.append(c2w)
- return torch.stack(c2ws)
|