diff --git a/process_mesh/dependencies/stb b/process_mesh/dependencies/stb index 5736b15f7..8b5f1f37b 160000 --- a/process_mesh/dependencies/stb +++ b/process_mesh/dependencies/stb @@ -1 +1 @@ -Subproject commit 5736b15f7ea0ffb08dd38af21067c314d6a3aae9 +Subproject commit 8b5f1f37b5b75829fc72d38e7b5d4bcbf8a26d55 diff --git a/requirements.txt b/requirements.txt index 4ed78b358..156ca419b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,9 +11,8 @@ submitit frozendict flow_vis vnoise -trimesh +trimesh==3.22.1 einops -mesh_to_sdf geomdl numpy==1.21.5 wandb @@ -27,3 +26,4 @@ google-images-search==1.4.4 landlab==2.4.1 scikit-learn psutil +pyrender diff --git a/worldgen/terrain/assets/caves/core.py b/worldgen/terrain/assets/caves/core.py index 0aa1a7432..12a665f2a 100644 --- a/worldgen/terrain/assets/caves/core.py +++ b/worldgen/terrain/assets/caves/core.py @@ -8,7 +8,7 @@ import bpy import gin -import mesh_to_sdf +import terrain.mesh_to_sdf as mesh_to_sdf import numpy as np from numpy import ascontiguousarray as AC from terrain.utils import Mesh diff --git a/worldgen/terrain/mesh_to_sdf/LICENSE b/worldgen/terrain/mesh_to_sdf/LICENSE new file mode 100644 index 000000000..0d51587d5 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Marian Kleineberg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/worldgen/terrain/mesh_to_sdf/__init__.py b/worldgen/terrain/mesh_to_sdf/__init__.py new file mode 100644 index 000000000..098618b1a --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/__init__.py @@ -0,0 +1,65 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +import numpy as np +from . import surface_point_cloud +from .surface_point_cloud import BadMeshException +from .utils import scale_to_unit_cube, scale_to_unit_sphere, get_raster_points, check_voxels +import trimesh + +def get_surface_point_cloud(mesh, surface_point_method='scan', bounding_radius=None, scan_count=100, scan_resolution=400, sample_point_count=10000000, calculate_normals=True): + if isinstance(mesh, trimesh.Scene): + mesh = mesh.dump().sum() + if not isinstance(mesh, trimesh.Trimesh): + raise TypeError("The mesh parameter must be a trimesh mesh.") + + if bounding_radius is None: + bounding_radius = np.max(np.linalg.norm(mesh.vertices, axis=1)) * 1.1 + + if surface_point_method == 'scan': + return surface_point_cloud.create_from_scans(mesh, bounding_radius=bounding_radius, scan_count=scan_count, scan_resolution=scan_resolution, calculate_normals=calculate_normals) + elif surface_point_method == 'sample': + return surface_point_cloud.sample_from_mesh(mesh, sample_point_count=sample_point_count, calculate_normals=calculate_normals) + else: + raise ValueError('Unknown surface point sampling method: {:s}'.format(surface_point_method)) + + +def mesh_to_sdf(mesh, query_points, surface_point_method='scan', sign_method='normal', bounding_radius=None, scan_count=100, scan_resolution=400, sample_point_count=10000000, normal_sample_count=11): + if not isinstance(query_points, np.ndarray): + raise TypeError('query_points must be a numpy array.') + if len(query_points.shape) != 2 or query_points.shape[1] != 3: + raise ValueError('query_points must be of shape N ✕ 3.') + + if surface_point_method == 'sample' and sign_method == 'depth': + print("Incompatible methods for sampling points and determining sign, using sign_method='normal' instead.") + sign_method = 'normal' + + point_cloud = get_surface_point_cloud(mesh, surface_point_method, bounding_radius, scan_count, scan_resolution, sample_point_count, calculate_normals=sign_method=='normal') + + if sign_method == 'normal': + return point_cloud.get_sdf_in_batches(query_points, use_depth_buffer=False) + elif sign_method == 'depth': + return point_cloud.get_sdf_in_batches(query_points, use_depth_buffer=True, sample_count=sample_point_count) + else: + raise ValueError('Unknown sign determination method: {:s}'.format(sign_method)) + + +def mesh_to_voxels(mesh, voxel_resolution=64, surface_point_method='scan', sign_method='normal', scan_count=100, scan_resolution=400, sample_point_count=10000000, normal_sample_count=11, pad=False, check_result=False, return_gradients=False): + mesh = scale_to_unit_cube(mesh) + + surface_point_cloud = get_surface_point_cloud(mesh, surface_point_method, 3**0.5, scan_count, scan_resolution, sample_point_count, sign_method=='normal') + + return surface_point_cloud.get_voxels(voxel_resolution, sign_method=='depth', normal_sample_count, pad, check_result, return_gradients) + +# Sample some uniform points and some normally distributed around the surface as proposed in the DeepSDF paper +def sample_sdf_near_surface(mesh, number_of_points = 500000, surface_point_method='scan', sign_method='normal', scan_count=100, scan_resolution=400, sample_point_count=10000000, normal_sample_count=11, min_size=0, return_gradients=False): + mesh = scale_to_unit_sphere(mesh) + + if surface_point_method == 'sample' and sign_method == 'depth': + print("Incompatible methods for sampling points and determining sign, using sign_method='normal' instead.") + sign_method = 'normal' + + surface_point_cloud = get_surface_point_cloud(mesh, surface_point_method, 1, scan_count, scan_resolution, sample_point_count, calculate_normals=sign_method=='normal' or return_gradients) + + return surface_point_cloud.sample_sdf_near_surface(number_of_points, surface_point_method=='scan', sign_method, normal_sample_count, min_size, return_gradients) \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/pyrender_wrapper.py b/worldgen/terrain/mesh_to_sdf/pyrender_wrapper.py new file mode 100644 index 000000000..b4d4bfa77 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/pyrender_wrapper.py @@ -0,0 +1,67 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +### Wrapper around the pyrender library that allows to +### 1. disable antialiasing +### 2. render a normal buffer +### This needs to be imported before pyrender or OpenGL is imported anywhere + +import os +import sys +if 'pyrender' in sys.modules: + raise ImportError('The mesh_to_sdf package must be imported before pyrender is imported.') +if 'OpenGL' in sys.modules: + raise ImportError('The mesh_to_sdf package must be imported before OpenGL is imported.') + +# Disable antialiasing: +import OpenGL.GL + +suppress_multisampling = False +old_gl_enable = OpenGL.GL.glEnable + +def new_gl_enable(value): + if suppress_multisampling and value == OpenGL.GL.GL_MULTISAMPLE: + OpenGL.GL.glDisable(value) + else: + old_gl_enable(value) + +OpenGL.GL.glEnable = new_gl_enable + +old_glRenderbufferStorageMultisample = OpenGL.GL.glRenderbufferStorageMultisample + +def new_glRenderbufferStorageMultisample(target, samples, internalformat, width, height): + if suppress_multisampling: + OpenGL.GL.glRenderbufferStorage(target, internalformat, width, height) + else: + old_glRenderbufferStorageMultisample(target, samples, internalformat, width, height) + +OpenGL.GL.glRenderbufferStorageMultisample = new_glRenderbufferStorageMultisample + +import pyrender + +# Render a normal buffer instead of a color buffer +class CustomShaderCache(): + def __init__(self): + self.program = None + + def get_program(self, vertex_shader, fragment_shader, geometry_shader=None, defines=None): + if self.program is None: + shaders_directory = os.path.join(os.path.dirname(__file__), 'shaders') + self.program = pyrender.shader_program.ShaderProgram(os.path.join(shaders_directory, 'mesh.vert'), os.path.join(shaders_directory, 'mesh.frag'), defines=defines) + return self.program + + +def render_normal_and_depth_buffers(mesh, camera, camera_transform, resolution): + global suppress_multisampling + suppress_multisampling = True + scene = pyrender.Scene() + scene.add(pyrender.Mesh.from_trimesh(mesh, smooth = False)) + scene.add(camera, pose=camera_transform) + + renderer = pyrender.OffscreenRenderer(resolution, resolution) + renderer._renderer._program_cache = CustomShaderCache() + + color, depth = renderer.render(scene, flags=pyrender.RenderFlags.SKIP_CULL_FACES) + suppress_multisampling = False + return color, depth \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/scan.py b/worldgen/terrain/mesh_to_sdf/scan.py new file mode 100644 index 000000000..ad4993ed3 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/scan.py @@ -0,0 +1,139 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +import numpy as np +from .pyrender_wrapper import render_normal_and_depth_buffers +import pyrender +from scipy.spatial.transform import Rotation +from skimage import io + +if hasattr(Rotation, "as_matrix"): # scipy>=1.4.0 + def get_rotation_matrix(angle, axis='y'): + matrix = np.identity(4) + matrix[:3, :3] = Rotation.from_euler(axis, angle).as_matrix() + return matrix +else: # scipy<1.4.0 + def get_rotation_matrix(angle, axis='y'): + matrix = np.identity(4) + matrix[:3, :3] = Rotation.from_euler(axis, angle).as_dcm() + return matrix + +def get_camera_transform_looking_at_origin(rotation_y, rotation_x, camera_distance=2): + camera_transform = np.identity(4) + camera_transform[2, 3] = camera_distance + camera_transform = np.matmul(get_rotation_matrix(rotation_x, axis='x'), camera_transform) + camera_transform = np.matmul(get_rotation_matrix(rotation_y, axis='y'), camera_transform) + return camera_transform + +# Camera transform from position and look direction +def get_camera_transform(position, look_direction): + camera_forward = -look_direction / np.linalg.norm(look_direction) + camera_right = np.cross(camera_forward, np.array((0, 0, -1))) + + if np.linalg.norm(camera_right) < 0.5: + camera_right = np.array((0, 1, 0), dtype=np.float32) + + camera_right /= np.linalg.norm(camera_right) + camera_up = np.cross(camera_forward, camera_right) + camera_up /= np.linalg.norm(camera_up) + + rotation = np.identity(4) + rotation[:3, 0] = camera_right + rotation[:3, 1] = camera_up + rotation[:3, 2] = camera_forward + + translation = np.identity(4) + translation[:3, 3] = position + + return np.matmul(translation, rotation) + +''' +A virtual laser scan of an object from one point in space. +This renders a normal and depth buffer and reprojects it into a point cloud. +The resulting point cloud contains a point for every pixel in the buffer that hit the model. +''' +class Scan(): + def __init__(self, mesh, camera_transform, resolution=400, calculate_normals=True, fov=1, z_near=0.1, z_far=10): + self.camera_transform = camera_transform + self.camera_position = np.matmul(self.camera_transform, np.array([0, 0, 0, 1]))[:3] + self.resolution = resolution + + camera = pyrender.PerspectiveCamera(yfov=fov, aspectRatio=1.0, znear = z_near, zfar = z_far) + self.projection_matrix = camera.get_projection_matrix() + + color, depth = render_normal_and_depth_buffers(mesh, camera, self.camera_transform, resolution) + + self.normal_buffer = color if calculate_normals else None + self.depth_buffer = depth.copy() + + indices = np.argwhere(depth != 0) + depth[depth == 0] = float('inf') + + # This reverts the processing that pyrender does and calculates the original depth buffer in clipping space + self.depth = (z_far + z_near - (2.0 * z_near * z_far) / depth) / (z_far - z_near) + + points = np.ones((indices.shape[0], 4)) + points[:, [1, 0]] = indices.astype(float) / (resolution -1) * 2 - 1 + points[:, 1] *= -1 + points[:, 2] = self.depth[indices[:, 0], indices[:, 1]] + + clipping_to_world = np.matmul(self.camera_transform, np.linalg.inv(self.projection_matrix)) + + points = np.matmul(points, clipping_to_world.transpose()) + points /= points[:, 3][:, np.newaxis] + self.points = points[:, :3] + + if calculate_normals: + normals = color[indices[:, 0], indices[:, 1]] / 255 * 2 - 1 + camera_to_points = self.camera_position - self.points + normal_orientation = np.einsum('ij,ij->i', camera_to_points, normals) + normals[normal_orientation < 0] *= -1 + self.normals = normals + else: + self.normals = None + + def convert_world_space_to_viewport(self, points): + half_viewport_size = 0.5 * self.resolution + clipping_to_viewport = np.array([ + [half_viewport_size, 0.0, 0.0, half_viewport_size], + [0.0, -half_viewport_size, 0.0, half_viewport_size], + [0.0, 0.0, 1.0, 0.0], + [0, 0, 0.0, 1.0] + ]) + + world_to_clipping = np.matmul(self.projection_matrix, np.linalg.inv(self.camera_transform)) + world_to_viewport = np.matmul(clipping_to_viewport, world_to_clipping) + + world_space_points = np.concatenate([points, np.ones((points.shape[0], 1))], axis=1) + viewport_points = np.matmul(world_space_points, world_to_viewport.transpose()) + viewport_points /= viewport_points[:, 3][:, np.newaxis] + return viewport_points + + def is_visible(self, points): + viewport_points = self.convert_world_space_to_viewport(points) + pixels = viewport_points[:, :2].astype(int) + + # This only has an effect if the camera is inside the model + in_viewport = (pixels[:, 0] >= 0) & (pixels[:, 1] >= 0) & (pixels[:, 0] < self.resolution) & (pixels[:, 1] < self.resolution) & (viewport_points[:, 2] > -1) + + result = np.zeros(points.shape[0], dtype=bool) + result[in_viewport] = viewport_points[in_viewport, 2] < self.depth[pixels[in_viewport, 1], pixels[in_viewport, 0]] + + return result + + def show(self): + scene = pyrender.Scene() + scene.add(pyrender.Mesh.from_points(self.points, normals=self.normals)) + pyrender.Viewer(scene, use_raymond_lighting=True, point_size=2) + + def save(self, filename_depth, filename_normals=None): + if filename_normals is None and self.normal_buffer is not None: + items = filename_depth.split('.') + filename_normals = '.'.join(items[:-1]) + "_normals." + items[-1] + + depth = self.depth_buffer / np.max(self.depth_buffer) * 255 + + io.imsave(filename_depth, depth.astype(np.uint8)) + if self.normal_buffer is not None: + io.imsave(filename_normals, self.normal_buffer.astype(np.uint8)) \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/shaders/mesh.frag b/worldgen/terrain/mesh_to_sdf/shaders/mesh.frag new file mode 100644 index 000000000..6b3218fa7 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/shaders/mesh.frag @@ -0,0 +1,17 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +#version 330 core + +in vec3 frag_position; +in vec3 frag_normal; + +out vec4 frag_color; + +void main() +{ + vec3 normal = normalize(frag_normal); + + frag_color = vec4(normal * 0.5 + 0.5, 1.0); +} \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/shaders/mesh.vert b/worldgen/terrain/mesh_to_sdf/shaders/mesh.vert new file mode 100644 index 000000000..046497818 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/shaders/mesh.vert @@ -0,0 +1,28 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +#version 330 core + +// Vertex Attributes +layout(location = 0) in vec3 position; +layout(location = NORMAL_LOC) in vec3 normal; +layout(location = INST_M_LOC) in mat4 inst_m; + +// Uniforms +uniform mat4 M; +uniform mat4 V; +uniform mat4 P; + +// Outputs +out vec3 frag_position; +out vec3 frag_normal; + +void main() +{ + gl_Position = P * V * M * inst_m * vec4(position, 1); + frag_position = vec3(M * inst_m * vec4(position, 1.0)); + + mat4 N = transpose(inverse(M * inst_m)); + frag_normal = normalize(vec3(N * vec4(normal, 0.0))); +} \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/surface_point_cloud.py b/worldgen/terrain/mesh_to_sdf/surface_point_cloud.py new file mode 100644 index 000000000..85b599278 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/surface_point_cloud.py @@ -0,0 +1,198 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + + +# All other contributions: +# Copyright (c) Princeton University. +# Licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree. +# Authors: Zeyu Ma + + +from .scan import Scan, get_camera_transform_looking_at_origin + +import trimesh +import logging +logging.getLogger("trimesh").setLevel(9000) +import numpy as np +from sklearn.neighbors import KDTree +import math +import pyrender +from .utils import sample_uniform_points_in_unit_sphere +from .utils import get_raster_points, check_voxels + +class BadMeshException(Exception): + pass + +class SurfacePointCloud: + def __init__(self, mesh, points, normals=None, scans=None): + self.mesh = mesh + self.points = points + self.normals = normals + self.scans = scans + + self.kd_tree = KDTree(points) + + def get_random_surface_points(self, count, use_scans=True): + if use_scans: + indices = np.random.choice(self.points.shape[0], count) + return self.points[indices, :] + else: + samples, index = trimesh.sample.sample_surface(mesh=self.mesh, count=count, face_weight=None, seed=0) + return samples + + def get_sdf(self, query_points, use_depth_buffer=False, sample_count=11, return_gradients=False): + if use_depth_buffer: + distances, indices = self.kd_tree.query(query_points) + distances = distances.astype(np.float32).reshape(-1) + inside = ~self.is_outside(query_points) + distances[inside] *= -1 + + if return_gradients: + gradients = query_points - self.points[indices[:, 0]] + gradients[inside] *= -1 + + else: + distances, indices = self.kd_tree.query(query_points, k=sample_count) + distances = distances.astype(np.float32) + + closest_points = self.points[indices] + direction_from_surface = query_points[:, np.newaxis, :] - closest_points + inside = np.einsum('ijk,ijk->ij', direction_from_surface, self.normals[indices]) < 0 + inside = np.sum(inside, axis=1) > sample_count * 0.5 + distances = distances[:, 0] + distances[inside] *= -1 + + if return_gradients: + gradients = direction_from_surface[:, 0] + gradients[inside] *= -1 + + if return_gradients: + near_surface = np.abs(distances) < math.sqrt(0.0025**2 * 3) * 3 # 3D 2-norm stdev * 3 + gradients = np.where(near_surface[:, np.newaxis], self.normals[indices[:, 0]], gradients) + gradients /= np.linalg.norm(gradients, axis=1)[:, np.newaxis] + return distances, gradients + else: + return distances + + def get_sdf_in_batches(self, query_points, use_depth_buffer=False, sample_count=11, batch_size=1000000, return_gradients=False): + if query_points.shape[0] <= batch_size: + return self.get_sdf(query_points, use_depth_buffer=use_depth_buffer, sample_count=sample_count, return_gradients=return_gradients) + + n_batches = int(math.ceil(query_points.shape[0] / batch_size)) + batches = [ + self.get_sdf(points, use_depth_buffer=use_depth_buffer, sample_count=sample_count, return_gradients=return_gradients) + for points in np.array_split(query_points, n_batches) + ] + if return_gradients: + distances = np.concatenate([batch[0] for batch in batches]) + gradients = np.concatenate([batch[1] for batch in batches]) + return distances, gradients + else: + return np.concatenate(batches) # distances + + def get_voxels(self, voxel_resolution, use_depth_buffer=False, sample_count=11, pad=False, check_result=False, return_gradients=False): + result = self.get_sdf_in_batches(get_raster_points(voxel_resolution), use_depth_buffer, sample_count, return_gradients=return_gradients) + if not return_gradients: + sdf = result + else: + sdf, gradients = result + voxel_gradients = np.reshape(gradients, (voxel_resolution, voxel_resolution, voxel_resolution, 3)) + + voxels = sdf.reshape((voxel_resolution, voxel_resolution, voxel_resolution)) + + if check_result and not check_voxels(voxels): + raise BadMeshException() + + if pad: + voxels = np.pad(voxels, 1, mode='constant', constant_values=1) + + if return_gradients: + if pad: + voxel_gradients = np.pad(voxel_gradients, ((1, 1), (1, 1), (1, 1), (0, 0)), mode='edge') + return voxels, voxel_gradients + else: + return voxels + + def sample_sdf_near_surface(self, number_of_points=500000, use_scans=True, sign_method='normal', normal_sample_count=11, min_size=0, return_gradients=False): + query_points = [] + surface_sample_count = int(number_of_points * 47 / 50) // 2 + surface_points = self.get_random_surface_points(surface_sample_count, use_scans=use_scans) + query_points.append(surface_points + np.random.normal(scale=0.0025, size=(surface_sample_count, 3))) + query_points.append(surface_points + np.random.normal(scale=0.00025, size=(surface_sample_count, 3))) + + unit_sphere_sample_count = number_of_points - surface_points.shape[0] * 2 + unit_sphere_points = sample_uniform_points_in_unit_sphere(unit_sphere_sample_count) + query_points.append(unit_sphere_points) + query_points = np.concatenate(query_points).astype(np.float32) + + if sign_method == 'normal': + sdf = self.get_sdf_in_batches(query_points, use_depth_buffer=False, sample_count=normal_sample_count, return_gradients=return_gradients) + elif sign_method == 'depth': + sdf = self.get_sdf_in_batches(query_points, use_depth_buffer=True, return_gradients=return_gradients) + else: + raise ValueError('Unknown sign determination method: {:s}'.format(sign_method)) + if return_gradients: + sdf, gradients = sdf + + if min_size > 0: + model_size = np.count_nonzero(sdf[-unit_sphere_sample_count:] < 0) / unit_sphere_sample_count + if model_size < min_size: + raise BadMeshException() + + if return_gradients: + return query_points, sdf, gradients + else: + return query_points, sdf + + def show(self): + scene = pyrender.Scene() + scene.add(pyrender.Mesh.from_points(self.points, normals=self.normals)) + pyrender.Viewer(scene, use_raymond_lighting=True, point_size=2) + + def is_outside(self, points): + result = None + for scan in self.scans: + if result is None: + result = scan.is_visible(points) + else: + result = np.logical_or(result, scan.is_visible(points)) + return result + +def get_equidistant_camera_angles(count): + increment = math.pi * (3 - math.sqrt(5)) + for i in range(count): + theta = math.asin(-1 + 2 * i / (count - 1)) + phi = ((i + 1) * increment) % (2 * math.pi) + yield phi, theta + +def create_from_scans(mesh, bounding_radius=1, scan_count=100, scan_resolution=400, calculate_normals=True): + scans = [] + + for phi, theta in get_equidistant_camera_angles(scan_count): + camera_transform = get_camera_transform_looking_at_origin(phi, theta, camera_distance=2 * bounding_radius) + scans.append(Scan(mesh, + camera_transform=camera_transform, + resolution=scan_resolution, + calculate_normals=calculate_normals, + fov=1.0472, + z_near=bounding_radius * 1, + z_far=bounding_radius * 3 + )) + + return SurfacePointCloud(mesh, + points=np.concatenate([scan.points for scan in scans], axis=0), + normals=np.concatenate([scan.normals for scan in scans], axis=0) if calculate_normals else None, + scans=scans + ) + +def sample_from_mesh(mesh, sample_point_count=10000000, calculate_normals=True): + points, face_indices = trimesh.sample.sample_surface(mesh=mesh, count=sample_point_count, face_weight=None, seed=0) + if calculate_normals: + normals = mesh.face_normals[face_indices] + + return SurfacePointCloud(mesh, + points=points, + normals=normals if calculate_normals else None, + scans=None + ) \ No newline at end of file diff --git a/worldgen/terrain/mesh_to_sdf/utils.py b/worldgen/terrain/mesh_to_sdf/utils.py new file mode 100644 index 000000000..0f75d81d8 --- /dev/null +++ b/worldgen/terrain/mesh_to_sdf/utils.py @@ -0,0 +1,66 @@ +# COPYRIGHT + +# Original files authored by Marian Kleineberg: https://github.com/marian42/mesh_to_sdf/tree/master + +import trimesh +import numpy as np + +def scale_to_unit_sphere(mesh): + if isinstance(mesh, trimesh.Scene): + mesh = mesh.dump().sum() + + vertices = mesh.vertices - mesh.bounding_box.centroid + distances = np.linalg.norm(vertices, axis=1) + vertices /= np.max(distances) + + return trimesh.Trimesh(vertices=vertices, faces=mesh.faces) + +def scale_to_unit_cube(mesh): + if isinstance(mesh, trimesh.Scene): + mesh = mesh.dump().sum() + + vertices = mesh.vertices - mesh.bounding_box.centroid + vertices *= 2 / np.max(mesh.bounding_box.extents) + + return trimesh.Trimesh(vertices=vertices, faces=mesh.faces) + +voxel_points = dict() + +def get_raster_points(voxel_resolution): + if voxel_resolution in voxel_points: + return voxel_points[voxel_resolution] + + points = np.meshgrid( + np.linspace(-1, 1, voxel_resolution), + np.linspace(-1, 1, voxel_resolution), + np.linspace(-1, 1, voxel_resolution) + ) + points = np.stack(points) + points = np.swapaxes(points, 1, 2) + points = points.reshape(3, -1).transpose().astype(np.float32) + + voxel_points[voxel_resolution] = points + return points + +def check_voxels(voxels): + block = voxels[:-1, :-1, :-1] + d1 = (block - voxels[1:, :-1, :-1]).reshape(-1) + d2 = (block - voxels[:-1, 1:, :-1]).reshape(-1) + d3 = (block - voxels[:-1, :-1, 1:]).reshape(-1) + + max_distance = max(np.max(d1), np.max(d2), np.max(d3)) + return max_distance < 2.0 / voxels.shape[0] * 3**0.5 * 1.1 + +def sample_uniform_points_in_unit_sphere(amount): + unit_sphere_points = np.random.uniform(-1, 1, size=(amount * 2 + 20, 3)) + unit_sphere_points = unit_sphere_points[np.linalg.norm(unit_sphere_points, axis=1) < 1] + + points_available = unit_sphere_points.shape[0] + if points_available < amount: + # This is a fallback for the rare case that too few points are inside the unit sphere + result = np.zeros((amount, 3)) + result[:points_available, :] = unit_sphere_points + result[points_available:, :] = sample_uniform_points_in_unit_sphere(amount - points_available) + return result + else: + return unit_sphere_points[:amount, :] \ No newline at end of file diff --git a/worldgen/terrain/source/cpu/soil_machine/particle/water.h b/worldgen/terrain/source/cpu/soil_machine/particle/water.h index ed98d8bd9..aa5fbc897 100644 --- a/worldgen/terrain/source/cpu/soil_machine/particle/water.h +++ b/worldgen/terrain/source/cpu/soil_machine/particle/water.h @@ -96,7 +96,8 @@ struct WaterParticle : public Particle { return false; //Motion Low - speed = mix(vec2(n.x, n.z), speed, param.friction); + // speed = mix(vec2(n.x, n.z), speed, param.friction); + speed = vec2(n.x, n.z) * float(1 - param.friction) + speed * float(param.friction); speed = sqrt(2.0f)*normalize(speed); pos += speed; diff --git a/worldgen/terrain/source/cpu/soil_machine/particle/wind.h b/worldgen/terrain/source/cpu/soil_machine/particle/wind.h index 116a1e36a..5126c62e8 100644 --- a/worldgen/terrain/source/cpu/soil_machine/particle/wind.h +++ b/worldgen/terrain/source/cpu/soil_machine/particle/wind.h @@ -89,9 +89,11 @@ struct WindParticle : public Particle { if(height > sheight) //Flying Movement speed.y -= gravity; //Gravity else //Contact Movement - speed = mix(speed, cross(cross(speed,n),n), windfriction); + // speed = mix(speed, cross(cross(speed,n),n), windfriction); + speed = speed * float(1 - windfriction) + cross(cross(speed,n),n) * float(windfriction); - speed = mix(speed, pspeed, winddominance); + // speed = mix(speed, pspeed, winddominance); + speed = speed * float(1 - winddominance) + pspeed * float(winddominance); pos += vec2(speed.x, speed.z); height += speed.y; diff --git a/worldgen/terrain/utils/mesh.py b/worldgen/terrain/utils/mesh.py index 2a92fbd58..33aea4001 100644 --- a/worldgen/terrain/utils/mesh.py +++ b/worldgen/terrain/utils/mesh.py @@ -56,6 +56,13 @@ def object_from_VF(name, vertices, faces): new_object.rotation_euler = (0, 0, 0) return new_object +def convert_face_array(face_array): + l = face_array.shape[0] + min_indices = np.argmin(face_array, axis=1) + u = face_array[list(np.arange(l)), min_indices] + v = face_array[list(np.arange(l)), (min_indices + 1) % 3] + w = face_array[list(np.arange(l)), (min_indices + 2) % 3] + return np.stack([u, v, w], -1) class Mesh: def __init__(self, normal_mode=NormalMode.Mean, @@ -151,6 +158,7 @@ def make_unique(self): self.faces = np.argsort(sorted_indices)[self.faces] for attr_name in self.vertex_attributes: self.vertex_attributes[attr_name] = self.vertex_attributes[attr_name][sorted_indices] + self.faces = convert_face_array(self.faces) transposed_array = self.faces.T sorted_indices = np.lexsort(transposed_array) self.faces = self.faces[sorted_indices]