From b89c3a525a7d58c7acd52bd445c6b01ac058ab98 Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 26 Jan 2021 20:56:58 -0800 Subject: [PATCH 01/24] Kick it off with a model00p reader. Right now it's a copy of pc ltb. --- src/reader_model00p_pc.py | 631 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 631 insertions(+) create mode 100644 src/reader_model00p_pc.py diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py new file mode 100644 index 0000000..35a53ea --- /dev/null +++ b/src/reader_model00p_pc.py @@ -0,0 +1,631 @@ +import os +from .abc import * +from .io import unpack +from mathutils import Vector, Matrix, Quaternion + +# LTB Mesh Types +LTB_Type_Rigid_Mesh = 4 +LTB_Type_Skeletal_Mesh = 5 +LTB_Type_Vertex_Animated_Mesh = 6 +LTB_Type_Null_Mesh = 7 + +# Data stream flags +VTX_Position = 0x0001 +VTX_Normal = 0x0002 +VTX_Colour = 0x0004 +VTX_UV_Sets_1 = 0x0010 +VTX_UV_Sets_2 = 0x0020 +VTX_UV_Sets_3 = 0x0040 +VTX_UV_Sets_4 = 0x0080 +VTX_BasisVector = 0x0100 + +# Animation Compression Types +CMP_None = 0 +CMP_Relevant = 1 +CMP_Relevant_16 = 2 +CMP_Relevant_Rot16 = 3 + +Invalid_Bone = 255 + +# +# Supports Model00p v33 (FEAR) +# +class PCModel00PackedReader(object): + def __init__(self): + + self.version = 0 + self.node_count = 0 + self.lod_count = 0 + + def _read_matrix(self, f): + data = unpack('16f', f) + rows = [data[0:4], data[4:8], data[8:12], data[12:16]] + return Matrix(rows) + + def _read_vector(self, f): + return Vector(unpack('3f', f)) + + def _read_quaternion(self, f): + x, y, z, w = unpack('4f', f) + return Quaternion((w, x, y, z)) + + def _read_string(self, f): + return f.read(unpack('H', f)[0]).decode('ascii') + + def _read_weight(self, f): + weight = Weight() + weight.node_index = unpack('I', f)[0] + weight.location = self._read_vector(f) + weight.bias = unpack('f', f)[0] + return weight + + def _read_vertex(self, f): + vertex = Vertex() + weight_count = unpack('H', f)[0] + vertex.sublod_vertex_index = unpack('H', f)[0] + vertex.weights = [self._read_weight(f) for _ in range(weight_count)] + vertex.location = self._read_vector(f) + vertex.normal = self._read_vector(f) + return vertex + + def _read_face_vertex(self, f): + face_vertex = FaceVertex() + face_vertex.texcoord.xy = unpack('2f', f) + face_vertex.vertex_index = unpack('H', f)[0] + return face_vertex + + def _read_face(self, f): + face = Face() + face.vertices = [self._read_face_vertex(f) for _ in range(3)] + return face + + def _read_null_mesh(self, lod, f): + # No data here but a filler int! + f.seek(4, 1) + return lod + + def _read_rigid_mesh(self, lod, f): + data_type = unpack('4I', f) + bone = unpack('I', f)[0] + + # We need face vertex data alongside vertices! + face_vertex_list = [] + + for mask in data_type: + for _ in range(lod.vert_count): + vertex = Vertex() + face_vertex = FaceVertex() + + # Dirty flags + is_vertex_used = False + is_face_vertex_used = False + + if mask & VTX_Position: + vertex.location = self._read_vector(f) + + # One bone per vertex + weight = Weight() + weight.node_index = bone + weight.bias = 1.0 + + vertex.weights.append(weight) + + is_vertex_used = True + if mask & VTX_Normal: + vertex.normal = self._read_vector(f) + is_vertex_used = True + if mask & VTX_Colour: + vertex.colour = unpack('i', f)[0] + is_vertex_used = True + if mask & VTX_UV_Sets_1: + face_vertex.texcoord.xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_2: + face_vertex.extra_texcoords[0].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_3: + face_vertex.extra_texcoords[1].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_4: + face_vertex.extra_texcoords[2].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_BasisVector: + vertex.s = self._read_vector(f) + vertex.t = self._read_vector(f) + is_vertex_used = True + # End If + + if is_vertex_used: + lod.vertices.append(vertex) + + if is_face_vertex_used: + face_vertex_list.append(face_vertex) + + # End For + # End For + + # Make sure our stuff is good!! + print ("Vert Count Check: %d/%d" % (lod.vert_count, len(lod.vertices))) + assert(lod.vert_count == len(lod.vertices)) + + # We need a "global" face, we'll fill it and re-use it. + face = Face() + for _ in range(lod.face_count * 3): + vertex_index = unpack('H', f)[0] + + face_vertex = face_vertex_list[vertex_index] + face_vertex.vertex_index = vertex_index + + # If we have room, append! + if len(face.vertices) < 3: + face.vertices.append(face_vertex) + # End If + + # If we're now over, then flush! + if len(face.vertices) >= 3: + lod.faces.append(face) + # Make a new face, and append our face vertex + face = Face() + # End If + # End For + + # Make sure our stuff is good!! + print ("Face Count Check: %d/%d" % (lod.face_count, len(lod.faces))) + assert(lod.face_count == len(lod.faces)) + + return lod + + def _read_skeletal_mesh(self, lod, f): + reindexed_bone = unpack('B', f)[0] + data_type = unpack('4I', f) + + matrix_palette = unpack('B', f)[0] + + print("Matrix Palette? %d" % matrix_palette) + + # We need face vertex data alongside vertices! + face_vertex_list = [] + + for mask in data_type: + for _ in range(lod.vert_count): + vertex = Vertex() + face_vertex = FaceVertex() + + # Dirty flags + is_vertex_used = False + is_face_vertex_used = False + + if mask & VTX_Position: + vertex.location = self._read_vector(f) + is_vertex_used = True + + weights = [] + + weight = Weight() + weight.bias = 1.0 + + for i in range(lod.max_bones_per_face): + # Skip the first one + if i == 0: + continue + # End If + + # There's 3 additional blends, + # If ... max_bones_per_face >= 2,3,4 + if lod.max_bones_per_face >= (i+1): + blend = unpack('f', f)[0] + weight.bias -= blend + + blend_weight = Weight() + blend_weight.bias = blend + weights.append(blend_weight) + # End If + # End For + + weights.append(weight) + + vertex.weights = weights + if mask & VTX_Normal: + vertex.normal = self._read_vector(f) + is_vertex_used = True + if mask & VTX_Colour: + vertex.colour = unpack('i', f)[0] + is_vertex_used = True + if mask & VTX_UV_Sets_1: + face_vertex.texcoord.xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_2: + face_vertex.extra_texcoords[0].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_3: + face_vertex.extra_texcoords[1].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_UV_Sets_4: + face_vertex.extra_texcoords[2].xy = unpack('2f', f) + is_face_vertex_used = True + if mask & VTX_BasisVector: + vertex.s = self._read_vector(f) + vertex.t = self._read_vector(f) + is_vertex_used = True + # End If + + if is_vertex_used: + lod.vertices.append(vertex) + + if is_face_vertex_used: + face_vertex_list.append(face_vertex) + + # End For + # End For + + # Make sure our stuff is good!! + print ("Vert Count Check: %d/%d" % (lod.vert_count, len(lod.vertices))) + assert(lod.vert_count == len(lod.vertices)) + + # We need a "global" face, we'll fill it and re-use it. + face = Face() + for _ in range(lod.face_count * 3): + vertex_index = unpack('H', f)[0] + + face_vertex = face_vertex_list[vertex_index] + face_vertex.vertex_index = vertex_index + + # If we have room, append! + if len(face.vertices) < 3: + face.vertices.append(face_vertex) + # End If + + # If we're now over, then flush! + if len(face.vertices) >= 3: + lod.faces.append(face) + # Make a new face, and append our face vertex + face = Face() + # End If + # End For + + # Make sure our stuff is good!! + print ("Face Count Check: %d/%d" % (lod.face_count, len(lod.faces))) + assert(lod.face_count == len(lod.faces)) + + bone_set_count = unpack('I', f)[0] + + for _ in range(bone_set_count): + index_start = unpack('H', f)[0] + index_count = unpack('H', f)[0] + + bone_list = unpack('4B', f) + + # ??? + index_buffer_index = unpack('I', f)[0] + + # Okay, now we can fill up our node indexes! + for vertex_index in range(index_start, index_start + index_count): + vertex = lod.vertices[vertex_index] + + # We need to re-build the weight list for our vertex + weights = [] + + for (index, bone_index) in enumerate(bone_list): + # If we've got an invalid bone (255) then ignore it + if bone_index == Invalid_Bone: + continue + # End If + + vertex.weights[index].node_index = bone_index + # Keep this one! + weights.append(vertex.weights[index]) + # End For + + total = 0.0 + for weight in weights: + total += weight.bias + + assert(total != 0.0) + + vertex.weights = weights + #End For + # End For + + return lod + + def _read_lod(self, f): + lod = LOD() + + lod.texture_count = unpack('I', f)[0] + lod.textures = unpack('4I', f) + lod.render_style = unpack('I', f)[0] + lod.render_priority = unpack('b', f)[0] + + lod.type = unpack('I', f)[0] + + # Check if it's a null mesh, it skips a lot of the data... + if lod.type == LTB_Type_Null_Mesh: + # Early return here, because there's no more data... + lod = self._read_null_mesh(lod, f) + else: + # Some common data + obj_size = unpack('I', f)[0] + lod.vert_count = unpack('I', f)[0] + lod.face_count = unpack('I', f)[0] + lod.max_bones_per_face = unpack('I', f)[0] + lod.max_bones_per_vert = unpack('I', f)[0] + + if lod.type == LTB_Type_Rigid_Mesh: + lod = self._read_rigid_mesh(lod, f) + elif lod.type == LTB_Type_Skeletal_Mesh: + lod = self._read_skeletal_mesh(lod, f) + + nodes_used_count = unpack('B', f)[0] + nodes_used = [unpack('B', f)[0] for _ in range(nodes_used_count)] + + return lod + + def _read_piece(self, f): + piece = Piece() + + piece.name = self._read_string(f) + lod_count = unpack('I', f)[0] + piece.lod_distances = [unpack('f', f)[0] for _ in range(lod_count)] + piece.lod_min = unpack('I', f)[0] + piece.lod_max = unpack('I', f)[0] + piece.lods = [self._read_lod(f) for _ in range(lod_count)] + + # Just use the first LODs first texture + if lod_count > 0: + piece.material_index = piece.lods[0].textures[0] + + return piece + + def _read_node(self, f): + node = Node() + node.name = self._read_string(f) + node.index = unpack('H', f)[0] + node.flags = unpack('b', f)[0] + node.bind_matrix = self._read_matrix(f) + node.inverse_bind_matrix = node.bind_matrix.inverted() + node.child_count = unpack('I', f)[0] + return node + + def _read_uncompressed_transform(self, f): + transform = Animation.Keyframe.Transform() + + transform.location = self._read_vector(f) + transform.rotation = self._read_quaternion(f) + + return transform + + def _process_compressed_vector(self, compressed_vector): + return Vector( (compressed_vector[0] / 16.0, compressed_vector[1] / 16.0, compressed_vector[2] / 16.0) ) + + def _process_compressed_quat(self, compressed_quat): + return Quaternion( (compressed_quat[3] / 0x7FFF, compressed_quat[0] / 0x7FFF, compressed_quat[1] / 0x7FFF, compressed_quat[2] / 0x7FFF) ) + + def _read_compressed_transform(self, compression_type, keyframe_count, f): + + node_transforms = [] + + for _ in range(self.node_count): + # RLE! + key_position_count = unpack('I', f)[0] + + compressed_positions = [] + if compression_type == CMP_Relevant or compression_type == CMP_Relevant_Rot16: + compressed_positions = [self._read_vector(f) for _ in range(key_position_count)] + elif compression_type == CMP_Relevant_16: + compressed_positions = [self._process_compressed_vector(unpack('3h', f)) for _ in range(key_position_count)] + # End If + + key_rotation_count = unpack('I', f)[0] + + compressed_rotations = [] + if compression_type == CMP_Relevant: + compressed_rotations = [self._read_quaternion(f) for _ in range(key_rotation_count)] + elif compression_type == CMP_Relevant_16 or compression_type == CMP_Relevant_Rot16: + compressed_rotations = [self._process_compressed_quat(unpack('4h', f)) for _ in range(key_rotation_count)] + # End If + + transforms = [] + + previous_position = Vector( (0, 0, 0) ) + previous_rotation = Quaternion( (1, 0, 0, 0) ) + + # RLE animations, if it doesn't change in any additional keyframe, + # then it we can just use the last known pos/rot! + for i in range(keyframe_count): + transform = Animation.Keyframe.Transform() + + try: + transform.location = compressed_positions[i] + except IndexError: + transform.location = previous_position + + try: + transform.rotation = compressed_rotations[i] + except IndexError: + transform.rotation = previous_rotation + + previous_position = transform.location + previous_rotation = transform.rotation + + transforms.append(transform) + # End For + + node_transforms.append(transforms) + # End For + + return node_transforms + + def _read_child_model(self, f): + child_model = ChildModel() + child_model.name = self._read_string(f) + return child_model + + def _read_keyframe(self, f): + keyframe = Animation.Keyframe() + keyframe.time = unpack('I', f)[0] + keyframe.string = self._read_string(f) + return keyframe + + def _read_animation(self, f): + animation = Animation() + animation.extents = self._read_vector(f) + animation.name = self._read_string(f) + animation.compression_type = unpack('i', f)[0] + animation.interpolation_time = unpack('I', f)[0] + animation.keyframe_count = unpack('I', f)[0] + animation.keyframes = [self._read_keyframe(f) for _ in range(animation.keyframe_count)] + animation.node_keyframe_transforms = [] + + if animation.compression_type == CMP_None: + for _ in range(self.node_count): + animation.is_vertex_animation = unpack('b', f)[0] + + # We don't support vertex animations yet, so alert if we accidentally load some! + assert(animation.is_vertex_animation == 0) + + animation.node_keyframe_transforms.append( + [self._read_uncompressed_transform(f) for _ in range(animation.keyframe_count)]) + # End For + else: + animation.node_keyframe_transforms = self._read_compressed_transform(animation.compression_type, animation.keyframe_count, f) + # End If + + return animation + + def _read_socket(self, f): + socket = Socket() + socket.node_index = unpack('I', f)[0] + socket.name = self._read_string(f) + socket.rotation = self._read_quaternion(f) + socket.location = self._read_vector(f) + socket.scale = self._read_vector(f) + return socket + + def _read_anim_binding(self, f): + anim_binding = AnimBinding() + anim_binding.name = self._read_string(f) + anim_binding.extents = self._read_vector(f) + anim_binding.origin = self._read_vector(f) + return anim_binding + + def _read_weight_set(self, f): + weight_set = WeightSet() + weight_set.name = self._read_string(f) + node_count = unpack('I', f)[0] + weight_set.node_weights = [unpack('f', f)[0] for _ in range(node_count)] + return weight_set + + def from_file(self, path): + model = Model() + model.name = os.path.splitext(os.path.basename(path))[0] + with open(path, 'rb') as f: + + # + # HEADER + # + file_type = unpack('H', f)[0] + file_version = unpack('H', f)[0] + + if file_type is not 1: + raise Exception('Unsupported File Type! Only mesh LTB files are supported.') + # End If + + if file_version is not 9: + raise Exception('Unsupported File Version! Importer currently only supports v9.') + # End If + + # Skip 4 ints + f.seek(4 * 4, 1) + + self.version = unpack('i', f)[0] + + if self.version not in [23, 24, 25]: + raise Exception('Unsupported file version ({}).'.format(self.version)) + # End If + + model.version = self.version + + keyframe_count = unpack('i', f)[0] + animation_count = unpack('i', f)[0] + self.node_count = unpack('i', f)[0] + piece_count = unpack('i', f)[0] + child_model_count = unpack('i', f)[0] + face_count = unpack('i', f)[0] + vertex_count = unpack('i', f)[0] + vertex_weight_count = unpack('i', f)[0] + lod_count = unpack('i', f)[0] + socket_count = unpack('i', f)[0] + weight_set_count = unpack('i', f)[0] + string_count = unpack('i', f)[0] + string_length = unpack('i', f)[0] + vertex_animation_data_size = unpack('i', f)[0] + animation_data_size = unpack('i', f)[0] + + model.command_string = self._read_string(f) + + model.internal_radius = unpack('f', f)[0] + + # + # OBB Information + # + obb_count = unpack('i', f)[0] + + obb_size = 64 + + if self.version > 23: + obb_size += 4 + + # OBB information is a matrix per each node + # We don't use it anywhere, so just skip it. + f.seek(obb_size * obb_count, 1) + + # + # Pieces + # + + # Yep again! + piece_count = unpack('i', f)[0] + model.pieces = [self._read_piece(f) for _ in range(piece_count)] + + # + # Nodes + # + model.nodes = [self._read_node(f) for _ in range(self.node_count)] + build_undirected_tree(model.nodes) + weight_set_count = unpack('I', f)[0] + model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] + + # + # Child Models + # + child_model_count = unpack('I', f)[0] + model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] + + # + # Animations + # + animation_count = unpack('I', f)[0] + model.animations = [self._read_animation(f) for _ in range(animation_count)] + + # + # Sockets + # + socket_count = unpack('I', f)[0] + model.sockets = [self._read_socket(f) for _ in range(socket_count)] + + # + # Animation Bindings + # + anim_binding_count = unpack('I', f)[0] + + #model.anim_bindings = [self._read_anim_binding(f) for _ in range(anim_binding_count)] + + for _ in range(anim_binding_count): + # Some LTB animation binding information can be incorrect... + # Almost like the mesh was accidentally cut off, very odd! + try: + model.anim_bindings.append(self._read_anim_binding(f)) + except Exception: + pass + + return model From ea1723e699a8872171c86fc1aeca4b680ecea608 Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 26 Jan 2021 23:14:48 -0800 Subject: [PATCH 02/24] Hook up the import, load in some nodes and start cracking at the animation compression --- src/__init__.py | 5 +- src/importer.py | 54 +++++ src/reader_model00p_pc.py | 425 +++++++++++++++++++++++++++++--------- 3 files changed, 384 insertions(+), 100 deletions(-) diff --git a/src/__init__.py b/src/__init__.py index eeb4d0a..83a1af3 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -2,7 +2,7 @@ 'name': 'Lithtech Tools', 'description': 'Import and export various Lithtech models and animations files.', 'author': 'Colin Basnett and HeyJake', - 'version': (1, 1, 0), + 'version': (1, 2, 0), 'blender': (2, 80, 0), 'location': 'File > Import-Export', 'warning': 'This add-on is under development.', @@ -47,6 +47,7 @@ classes = ( importer.ImportOperatorABC, importer.ImportOperatorLTB, + importer.ImportOperatorModel00p, exporter.ExportOperatorABC, exporter.ExportOperatorLTA, converter.ConvertPCLTBToLTA, @@ -60,6 +61,7 @@ def register(): # Import options bpy.types.TOPBAR_MT_file_import.append(importer.ImportOperatorABC.menu_func_import) bpy.types.TOPBAR_MT_file_import.append(importer.ImportOperatorLTB.menu_func_import) + bpy.types.TOPBAR_MT_file_import.append(importer.ImportOperatorModel00p.menu_func_import) # Export options bpy.types.TOPBAR_MT_file_export.append(exporter.ExportOperatorABC.menu_func_export) @@ -77,6 +79,7 @@ def unregister(): # Import options bpy.types.TOPBAR_MT_file_import.remove(importer.ImportOperatorABC.menu_func_import) bpy.types.TOPBAR_MT_file_import.remove(importer.ImportOperatorLTB.menu_func_import) + bpy.types.TOPBAR_MT_file_import.remove(importer.ImportOperatorModel00p.menu_func_import) # Export options bpy.types.TOPBAR_MT_file_export.remove(exporter.ExportOperatorABC.menu_func_export) diff --git a/src/importer.py b/src/importer.py index b723d8f..6e96694 100644 --- a/src/importer.py +++ b/src/importer.py @@ -15,6 +15,8 @@ from .reader_ltb_pc import PCLTBModelReader from .reader_ltb_ps2 import PS2LTBModelReader +from .reader_model00p_pc import PCModel00PackedReader + from . import utils @@ -697,3 +699,55 @@ def execute(self, context): @staticmethod def menu_func_import(self, context): self.layout.operator(ImportOperatorLTB.bl_idname, text='Lithtech LTB (.ltb)') + + +class ImportOperatorModel00p(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): + """Loads Jupiter EX model00p files.""" + bl_idname = 'io_scene_lithtech.model00p_import' # important since its how bpy.ops.import_test.some_data is constructed + bl_label = 'Import Lithtech Model00p' + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + + # ImportHelper mixin class uses this + filename_ext = ".model00p" + + filter_glob: StringProperty( + default="*.model00p", + options={'HIDDEN'}, + maxlen=255, # Max internal buffer length, longer would be clamped. + ) + + def draw(self, context): + layout = self.layout + + def execute(self, context): + + + # Load the model + model = PCModel00PackedReader().from_file(self.filepath) + + # Load the model + #try: + #model = PS2LTBModelReader().from_file(self.filepath) + #except Exception as e: + # show_message_box(str(e), "Read Error", 'ERROR') + # return {'CANCELLED'} + + model.name = os.path.splitext(os.path.basename(self.filepath))[0] + image = None + + options = ModelImportOptions() + + options.image = image + #try: + # import_model(model, options) + #except Exception as e: + # show_message_box(str(e), "Import Error", 'ERROR') + # return {'CANCELLED'} + import_model(model, options) + + return {'FINISHED'} + + @staticmethod + def menu_func_import(self, context): + self.layout.operator(ImportOperatorModel00p.bl_idname, text='Lithtech Model00p (.model00p)') diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 35a53ea..a5b0def 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -32,37 +32,74 @@ # class PCModel00PackedReader(object): def __init__(self): + self.is_little_endian = True self.version = 0 self.node_count = 0 self.lod_count = 0 + self.string_table = "" + + # + # Wrapper around .io.unpack that can eventually handle big-endian reads. + # + def _unpack(self, fmt, f): + + # Force big endian if we're not little! + if self.is_little_endian == False: + fmt = '>%s' % fmt + + return unpack(fmt, f) + + def _get_string_from_table(self, offset): + value = self.string_table[offset:] + + # Okay we need to find the next null character now! + null_terminator = -1 + for (index, char) in enumerate(value): + if char == '\x00': + null_terminator = index + break + + # Make sure we actually ran through the string + assert(null_terminator != -1) + + length = offset + null_terminator + + return self.string_table[offset:length] def _read_matrix(self, f): - data = unpack('16f', f) + data = self._unpack('16f', f) rows = [data[0:4], data[4:8], data[8:12], data[12:16]] return Matrix(rows) def _read_vector(self, f): - return Vector(unpack('3f', f)) + return Vector(self._unpack('3f', f)) + + def _read_short_quaternion(self, f): + x, y, z, w = self._unpack('4H', f) + return [w,x,y,z]#Quaternion((w, x, y, z)) def _read_quaternion(self, f): - x, y, z, w = unpack('4f', f) + x, y, z, w = self._unpack('4f', f) return Quaternion((w, x, y, z)) def _read_string(self, f): - return f.read(unpack('H', f)[0]).decode('ascii') + return f.read(self._unpack('H', f)[0]).decode('ascii') + + def _read_fixed_string(self, length, f): + return f.read(length).decode('ascii') def _read_weight(self, f): weight = Weight() - weight.node_index = unpack('I', f)[0] + weight.node_index = self._unpack('I', f)[0] weight.location = self._read_vector(f) - weight.bias = unpack('f', f)[0] + weight.bias = self._unpack('f', f)[0] return weight def _read_vertex(self, f): vertex = Vertex() - weight_count = unpack('H', f)[0] - vertex.sublod_vertex_index = unpack('H', f)[0] + weight_count = self._unpack('H', f)[0] + vertex.sublod_vertex_index = self._unpack('H', f)[0] vertex.weights = [self._read_weight(f) for _ in range(weight_count)] vertex.location = self._read_vector(f) vertex.normal = self._read_vector(f) @@ -70,8 +107,8 @@ def _read_vertex(self, f): def _read_face_vertex(self, f): face_vertex = FaceVertex() - face_vertex.texcoord.xy = unpack('2f', f) - face_vertex.vertex_index = unpack('H', f)[0] + face_vertex.texcoord.xy = self._unpack('2f', f) + face_vertex.vertex_index = self._unpack('H', f)[0] return face_vertex def _read_face(self, f): @@ -378,12 +415,21 @@ def _read_piece(self, f): def _read_node(self, f): node = Node() - node.name = self._read_string(f) - node.index = unpack('H', f)[0] - node.flags = unpack('b', f)[0] - node.bind_matrix = self._read_matrix(f) + name_offset = self._unpack('I', f)[0] + node.name = self._get_string_from_table(name_offset) + node.index = self._unpack('H', f)[0] + node.flags = self._unpack('b', f)[0] + + location = self._read_vector(f) + rotation = self._read_quaternion(f) + + # Transform location/rotation into a bind matrix! + mat_rot = rotation.to_matrix() + mat_loc = Matrix.Translation(location) + node.bind_matrix = mat_loc @ mat_rot.to_4x4() + node.inverse_bind_matrix = node.bind_matrix.inverted() - node.child_count = unpack('I', f)[0] + node.child_count = self._unpack('I', f)[0] return node def _read_uncompressed_transform(self, f): @@ -520,112 +566,293 @@ def from_file(self, path): model.name = os.path.splitext(os.path.basename(path))[0] with open(path, 'rb') as f: - # - # HEADER - # - file_type = unpack('H', f)[0] - file_version = unpack('H', f)[0] - - if file_type is not 1: - raise Exception('Unsupported File Type! Only mesh LTB files are supported.') - # End If + file_format = self._read_fixed_string(4, f) - if file_version is not 9: - raise Exception('Unsupported File Version! Importer currently only supports v9.') + # Are we big-endian? + if file_format == "LDOM": + print("!! Big-endian Model00p loaded. Haven't tested this yet, may be bugs!!!") + self.is_little_endian = False + # No, then make sure we're little endian + elif file_format != "MODL": + raise Exception('Unsupported File Format! Only Model00p files are supported.') # End If - # Skip 4 ints - f.seek(4 * 4, 1) + self.version = self._unpack('I', f)[0] - self.version = unpack('i', f)[0] - - if self.version not in [23, 24, 25]: - raise Exception('Unsupported file version ({}).'.format(self.version)) + # Fear for now! + if self.version is not 33: + raise Exception('Unsupported File Version! Importer currently only supports v33.') # End If model.version = self.version - keyframe_count = unpack('i', f)[0] - animation_count = unpack('i', f)[0] - self.node_count = unpack('i', f)[0] - piece_count = unpack('i', f)[0] - child_model_count = unpack('i', f)[0] - face_count = unpack('i', f)[0] - vertex_count = unpack('i', f)[0] - vertex_weight_count = unpack('i', f)[0] - lod_count = unpack('i', f)[0] - socket_count = unpack('i', f)[0] - weight_set_count = unpack('i', f)[0] - string_count = unpack('i', f)[0] - string_length = unpack('i', f)[0] - vertex_animation_data_size = unpack('i', f)[0] - animation_data_size = unpack('i', f)[0] - - model.command_string = self._read_string(f) - - model.internal_radius = unpack('f', f)[0] + keyframe_count = self._unpack('I', f)[0] + animation_count = self._unpack('I', f)[0] + self.node_count = self._unpack('I', f)[0] + piece_count = self._unpack('I', f)[0] + child_model_count = self._unpack('I', f)[0] + self.lod_count = self._unpack('I', f)[0] + socket_count = self._unpack('I', f)[0] + animation_weight_count = self._unpack('I', f)[0] + unk_8 = self._unpack('I', f)[0] + string_data_length = self._unpack('I', f)[0] + physics_weight_count = self._unpack('I', f)[0] + physics_shape_count = self._unpack('I', f)[0] + unk_12 = self._unpack('I', f)[0] # ?? + unk_13 = self._unpack('I', f)[0] # ?? + # Physics Constraints + stiff_sprint_constraint_count = self._unpack('I', f)[0] + hinge_constraint_count = self._unpack('I', f)[0] + limited_hinge_constraint_count = self._unpack('I', f)[0] + ragdoll_constraint_count = self._unpack('I', f)[0] + wheel_constraint_count = self._unpack('I', f)[0] + prismatic_constraint_count = self._unpack('I', f)[0] + # End + animation_data_length = self._unpack('I', f)[0] + self.string_table = self._read_fixed_string(string_data_length, f) # - # OBB Information + # Nodes # - obb_count = unpack('i', f)[0] - - obb_size = 64 + model.nodes = [self._read_node(f) for _ in range(self.node_count)] + build_undirected_tree(model.nodes) - if self.version > 23: - obb_size += 4 + # + # Animations + # + unknown = self._unpack('I', f)[0] - # OBB information is a matrix per each node - # We don't use it anywhere, so just skip it. - f.seek(obb_size * obb_count, 1) + # What is it? We'll find out...one day... + if unknown != 0: + print("Unknown animation value is not 0! It's %d" % unknown) - # - # Pieces # + current_data_read = 0 - # Yep again! - piece_count = unpack('i', f)[0] - model.pieces = [self._read_piece(f) for _ in range(piece_count)] + # RLE + # Process lists, TRUE if the value is there, FALSE if it's assumed data. + process_location = [] + process_rotation = [] - # - # Nodes - # - model.nodes = [self._read_node(f) for _ in range(self.node_count)] - build_undirected_tree(model.nodes) - weight_set_count = unpack('I', f)[0] - model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] + # TODO: This should be a per each animation loop starting here! - # - # Child Models - # - child_model_count = unpack('I', f)[0] - model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] - # - # Animations - # - animation_count = unpack('I', f)[0] - model.animations = [self._read_animation(f) for _ in range(animation_count)] + # Track 1 seems to be all rotation data up front? + # Track 2 seems to be Location/Rotation data - # - # Sockets - # - socket_count = unpack('I', f)[0] - model.sockets = [self._read_socket(f) for _ in range(socket_count)] + # Or that's suppose to how it goes, but I don't even know maaan. + track_1_size = self._unpack('H', f)[0] + track_2_size = self._unpack('H', f)[0] - # - # Animation Bindings - # - anim_binding_count = unpack('I', f)[0] + combined_track_size = track_1_size + track_2_size + + + # Data is formatted like... + # ((Location, Rotation) * NodeCount) * KeyframeCount + + is_location = True + while current_data_read < combined_track_size: + flag = self._unpack('H', f)[0] + + # Carry over data from previous frame + if flag == 0xFFFF: + if is_location: + process_location.append(False) + else: + process_rotation.append(False) + + is_location = not is_location + continue + + data_read = 0 + + if is_location: + process_location.append(True) + data_read += 12 + else: + process_rotation.append(True) + data_read += 8 + + current_data_read += data_read + + is_location = not is_location + + # Special case read + locations = [] + rotations = [] + + # Location and Rotation intermixed + if track_1_size == 0: + print("Reading animation data in MODE A") + for i in range(len(process_location)): + read_location = process_location[i] + read_rotation = process_rotation[i] + + if read_location: + locations.append(self._read_vector(f)) + else: + if i == 0: + locations.append( Vector() ) + else: + locations.append( locations[i - 1] ) + + if read_rotation: + rotations.append(self._read_short_quaternion(f)) + else: + if i == 0: + rotations.append( [32768,0,0,0] ) + else: + rotations.append( rotations[i - 1] ) + # Rotation only -- Not correct!!! + elif track_2_size == 0: + print("Reading animation data in MODE B") + for i in range(len(process_location)): + read_rotation = process_rotation[i] + + if read_rotation: + rotations.append(self._read_short_quaternion(f)) + else: + if i == 0: + rotations.append( [32768,0,0,0] ) + else: + rotations.append( rotations[i - 1] ) + # Rotation data then Location data + else: + print("Reading animation data in MODE C") + for i in range(len(process_location)): + read_rotation = process_rotation[i] + + if read_rotation: + rotations.append(self._read_short_quaternion(f)) + else: + if i == 0: + rotations.append( [32768,0,0,0] ) + else: + rotations.append( rotations[i - 1] ) + + for i in range(len(process_location)): + read_location = process_location[i] + read_rotation = process_rotation[i] + + if read_location: + locations.append(self._read_vector(f)) + else: + if i == 0: + locations.append( Vector() ) + else: + locations.append( locations[i - 1] ) - #model.anim_bindings = [self._read_anim_binding(f) for _ in range(anim_binding_count)] - for _ in range(anim_binding_count): - # Some LTB animation binding information can be incorrect... - # Almost like the mesh was accidentally cut off, very odd! - try: - model.anim_bindings.append(self._read_anim_binding(f)) - except Exception: - pass return model + +#old + # # + # # HEADER + # # + # file_format = unpack('H', f)[0] + # file_version = unpack('H', f)[0] + + # if file_type is not 1: + # raise Exception('Unsupported File Type! Only mesh LTB files are supported.') + # # End If + + # if file_version is not 9: + # raise Exception('Unsupported File Version! Importer currently only supports v9.') + # # End If + + # # Skip 4 ints + # f.seek(4 * 4, 1) + + # self.version = unpack('i', f)[0] + + # if self.version not in [23, 24, 25]: + # raise Exception('Unsupported file version ({}).'.format(self.version)) + # # End If + + # model.version = self.version + + # keyframe_count = unpack('i', f)[0] + # animation_count = unpack('i', f)[0] + # self.node_count = unpack('i', f)[0] + # piece_count = unpack('i', f)[0] + # child_model_count = unpack('i', f)[0] + # face_count = unpack('i', f)[0] + # vertex_count = unpack('i', f)[0] + # vertex_weight_count = unpack('i', f)[0] + # lod_count = unpack('i', f)[0] + # socket_count = unpack('i', f)[0] + # weight_set_count = unpack('i', f)[0] + # string_count = unpack('i', f)[0] + # string_length = unpack('i', f)[0] + # vertex_animation_data_size = unpack('i', f)[0] + # animation_data_size = unpack('i', f)[0] + + # model.command_string = self._read_string(f) + + # model.internal_radius = unpack('f', f)[0] + + # # + # # OBB Information + # # + # obb_count = unpack('i', f)[0] + + # obb_size = 64 + + # if self.version > 23: + # obb_size += 4 + + # # OBB information is a matrix per each node + # # We don't use it anywhere, so just skip it. + # f.seek(obb_size * obb_count, 1) + + # # + # # Pieces + # # + + # # Yep again! + # piece_count = unpack('i', f)[0] + # model.pieces = [self._read_piece(f) for _ in range(piece_count)] + + # # + # # Nodes + # # + # model.nodes = [self._read_node(f) for _ in range(self.node_count)] + # build_undirected_tree(model.nodes) + # weight_set_count = unpack('I', f)[0] + # model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] + + # # + # # Child Models + # # + # child_model_count = unpack('I', f)[0] + # model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] + + # # + # # Animations + # # + # animation_count = unpack('I', f)[0] + # model.animations = [self._read_animation(f) for _ in range(animation_count)] + + # # + # # Sockets + # # + # socket_count = unpack('I', f)[0] + # model.sockets = [self._read_socket(f) for _ in range(socket_count)] + + # # + # # Animation Bindings + # # + # anim_binding_count = unpack('I', f)[0] + + # #model.anim_bindings = [self._read_anim_binding(f) for _ in range(anim_binding_count)] + + # for _ in range(anim_binding_count): + # # Some LTB animation binding information can be incorrect... + # # Almost like the mesh was accidentally cut off, very odd! + # try: + # model.anim_bindings.append(self._read_anim_binding(f)) + # except Exception: + # pass + + # return model From 40ae1c3a70ff70df13aafcdd9a85af1f62304b4d Mon Sep 17 00:00:00 2001 From: Jake Date: Thu, 28 Jan 2021 20:57:27 -0800 Subject: [PATCH 03/24] Lot more ugly wip code for reading model00p --- src/abc.py | 13 +++ src/reader_model00p_pc.py | 167 +++++++++++++++++++++++++++----------- 2 files changed, 132 insertions(+), 48 deletions(-) diff --git a/src/abc.py b/src/abc.py index 017cd3e..d734a39 100644 --- a/src/abc.py +++ b/src/abc.py @@ -140,6 +140,10 @@ def __init__(self): # Version 6 specific self.md_vert_count = 0 self.md_vert_list = [] + + # Model00p specific + self.location = Vector() + self.rotation = Quaternion((1, 0, 0, 0)) def __repr__(self): return self.name @@ -216,6 +220,15 @@ def __init__(self): self.extents = Vector() self.origin = Vector() + # Model00p specific + self.radius = 1.0 + self.rotation = Vector() # Eulers? + self.interpolation_time = 200 + +class AnimInfo(object): + def __init__(self): + self.animation = Animation() + self.binding = AnimBinding() class ChildModel(object): def __init__(self): diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index a5b0def..90f7da6 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -420,12 +420,12 @@ def _read_node(self, f): node.index = self._unpack('H', f)[0] node.flags = self._unpack('b', f)[0] - location = self._read_vector(f) - rotation = self._read_quaternion(f) + node.location = self._read_vector(f) + node.rotation = self._read_quaternion(f) # Transform location/rotation into a bind matrix! - mat_rot = rotation.to_matrix() - mat_loc = Matrix.Translation(location) + mat_rot = node.rotation.to_matrix() + mat_loc = Matrix.Translation(node.location) node.bind_matrix = mat_loc @ mat_rot.to_4x4() node.inverse_bind_matrix = node.bind_matrix.inverted() @@ -509,7 +509,8 @@ def _read_child_model(self, f): def _read_keyframe(self, f): keyframe = Animation.Keyframe() keyframe.time = unpack('I', f)[0] - keyframe.string = self._read_string(f) + string_offset = unpack('I', f)[0] + keyframe.string = self._get_string_from_table(string_offset) return keyframe def _read_animation(self, f): @@ -549,11 +550,36 @@ def _read_socket(self, f): def _read_anim_binding(self, f): anim_binding = AnimBinding() - anim_binding.name = self._read_string(f) + anim_binding.extents = self._read_vector(f) - anim_binding.origin = self._read_vector(f) + + anim_binding.radius = self._unpack('f', f)[0] + + name_offset = self._unpack('I', f)[0] + anim_binding.name = self._get_string_from_table(name_offset) + + anim_binding.interpolation_time = self._unpack('I', f)[0] + + # 12 bytes of empty data hmmmm HMMMM + #unk_vector = self._read_vector(f) + unk_1 = self._unpack('I', f)[0] + unk_2 = self._unpack('I', f)[0] + unk_3 = self._unpack('I', f)[0] + + fin = True + return anim_binding + def _read_anim_info(self, f): + anim_info = AnimInfo() + + anim_info.binding = self._read_anim_binding(f) + + anim_info.animation.keyframe_count = self._unpack('I', f)[0] + anim_info.animation.keyframes = [self._read_keyframe(f) for _ in range(anim_info.animation.keyframe_count)] + + return anim_info + def _read_weight_set(self, f): weight_set = WeightSet() weight_set.name = self._read_string(f) @@ -594,7 +620,7 @@ def from_file(self, path): self.lod_count = self._unpack('I', f)[0] socket_count = self._unpack('I', f)[0] animation_weight_count = self._unpack('I', f)[0] - unk_8 = self._unpack('I', f)[0] + animation_binding_count = self._unpack('I', f)[0] string_data_length = self._unpack('I', f)[0] physics_weight_count = self._unpack('I', f)[0] physics_shape_count = self._unpack('I', f)[0] @@ -626,61 +652,106 @@ def from_file(self, path): if unknown != 0: print("Unknown animation value is not 0! It's %d" % unknown) + + + # - current_data_read = 0 + # RLE # Process lists, TRUE if the value is there, FALSE if it's assumed data. - process_location = [] - process_rotation = [] + # Dictionary per animation, Location/Rotation + process_section = [] - # TODO: This should be a per each animation loop starting here! + for i in range(animation_count): + current_data_read = 0 + process_location = [] + process_rotation = [] + # TODO: This should be a per each animation loop starting here! - # Track 1 seems to be all rotation data up front? - # Track 2 seems to be Location/Rotation data - # Or that's suppose to how it goes, but I don't even know maaan. - track_1_size = self._unpack('H', f)[0] - track_2_size = self._unpack('H', f)[0] + # Track 1 seems to be all rotation data up front? + # Track 2 seems to be Location/Rotation data - combined_track_size = track_1_size + track_2_size + # Or that's suppose to how it goes, but I don't even know maaan. - - # Data is formatted like... - # ((Location, Rotation) * NodeCount) * KeyframeCount + # Track 2 = val | 0x8000 + track_1_size = self._unpack('H', f)[0] + track_2_size = self._unpack('H', f)[0] + + combined_track_size = track_1_size + track_2_size - is_location = True - while current_data_read < combined_track_size: - flag = self._unpack('H', f)[0] + # Data is formatted like... + # ((Location, Rotation) * NodeCount) * KeyframeCount + is_location = True + while current_data_read < combined_track_size: + flag = self._unpack('H', f)[0] + + # Carry over data from previous frame + if flag == 0xFFFF: + if is_location: + process_location.append(False) + else: + process_rotation.append(False) + + is_location = not is_location + continue + + data_read = flag + if flag >= 0x8000: + data_read -= 0x8000 - # Carry over data from previous frame - if flag == 0xFFFF: if is_location: - process_location.append(False) + process_location.append(True) else: - process_rotation.append(False) + process_rotation.append(True) + + current_data_read += data_read is_location = not is_location - continue - - data_read = 0 - if is_location: - process_location.append(True) - data_read += 12 - else: - process_rotation.append(True) - data_read += 8 + process_section.append( { 'location': process_location, 'rotation': process_rotation } ) + + # Okay save the current position, and read ahead to the keyframe data + animation_position = f.tell() + + # Skip ahead to keyframes! + f.seek(animation_data_length , 1) + + #model.anim_bindings = [self._read_anim_binding(f) for _ in range(animation_binding_count)] + anim_infos = [self._read_anim_info(f) for _ in range(animation_binding_count)] - current_data_read += data_read + animation_binding_position = f.tell() + f.seek(animation_position, 0) - is_location = not is_location + return model # Special case read locations = [] rotations = [] + default_locations = [] + default_rotations = [] + + # Note: Defaults should be the node transform values, not Vector(0,0,0) for example. + + for node in model.nodes: + default_locations.append(node.location) + default_rotations.append(node.rotation) + + # Not really it, but a starting point! + def decompres_quat(compresed_quat): + # Find highest number, assume that's 1.0 + largest_number = -1 + for quat in compresed_quat: + if quat > largest_number: + largest_number = quat + + return Quaternion( ( compresed_quat[0] / largest_number, compresed_quat[1] / largest_number, compresed_quat[2] / largest_number, compresed_quat[3] / largest_number ) ) + + + # Location and Rotation intermixed if track_1_size == 0: print("Reading animation data in MODE A") @@ -692,15 +763,15 @@ def from_file(self, path): locations.append(self._read_vector(f)) else: if i == 0: - locations.append( Vector() ) + locations.append( default_locations[i] ) else: locations.append( locations[i - 1] ) if read_rotation: - rotations.append(self._read_short_quaternion(f)) + rotations.append( decompres_quat(self._read_short_quaternion(f)) ) else: - if i == 0: - rotations.append( [32768,0,0,0] ) + if i == 0: + rotations.append( default_rotations[i] ) else: rotations.append( rotations[i - 1] ) # Rotation only -- Not correct!!! @@ -710,10 +781,10 @@ def from_file(self, path): read_rotation = process_rotation[i] if read_rotation: - rotations.append(self._read_short_quaternion(f)) + rotations.append( decompres_quat(self._read_short_quaternion(f)) ) else: if i == 0: - rotations.append( [32768,0,0,0] ) + rotations.append( default_rotations[i] ) else: rotations.append( rotations[i - 1] ) # Rotation data then Location data @@ -723,10 +794,10 @@ def from_file(self, path): read_rotation = process_rotation[i] if read_rotation: - rotations.append(self._read_short_quaternion(f)) + rotations.append( decompres_quat(self._read_short_quaternion(f)) ) else: if i == 0: - rotations.append( [32768,0,0,0] ) + rotations.append( default_rotations[i] ) else: rotations.append( rotations[i - 1] ) @@ -738,7 +809,7 @@ def from_file(self, path): locations.append(self._read_vector(f)) else: if i == 0: - locations.append( Vector() ) + locations.append( default_locations[i] ) else: locations.append( locations[i - 1] ) From 9413614ab09808c162170a49f24a764355ccef1e Mon Sep 17 00:00:00 2001 From: Jake Date: Sat, 30 Jan 2021 18:19:28 -0800 Subject: [PATCH 04/24] More of a mess. Reads non-interlaced animation headers, and bindings. --- src/abc.py | 4 ++ src/reader_model00p_pc.py | 130 ++++++++++++++++++++++++++++++++------ 2 files changed, 115 insertions(+), 19 deletions(-) diff --git a/src/abc.py b/src/abc.py index d734a39..5eb5d7a 100644 --- a/src/abc.py +++ b/src/abc.py @@ -224,6 +224,10 @@ def __init__(self): self.radius = 1.0 self.rotation = Vector() # Eulers? self.interpolation_time = 200 + + self.animation_header_index = -1 + self.data_position = -1 + self.compressed = -1 # Location compression only! Rotation data is always compressed. class AnimInfo(object): def __init__(self): diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 90f7da6..a4a1998 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -25,6 +25,11 @@ CMP_Relevant_16 = 2 CMP_Relevant_Rot16 = 3 +# Animation processing values +ANIM_No_Compression = 0 +ANIM_Compression = 1 +ANIM_Carry_Over = 2 + Invalid_Bone = 255 # @@ -560,11 +565,9 @@ def _read_anim_binding(self, f): anim_binding.interpolation_time = self._unpack('I', f)[0] - # 12 bytes of empty data hmmmm HMMMM - #unk_vector = self._read_vector(f) - unk_1 = self._unpack('I', f)[0] - unk_2 = self._unpack('I', f)[0] - unk_3 = self._unpack('I', f)[0] + anim_binding.animation_header_index = self._unpack('I', f)[0] + anim_binding.data_position = self._unpack('I', f)[0] + anim_binding.is_compressed = self._unpack('I', f)[0] fin = True @@ -620,7 +623,7 @@ def from_file(self, path): self.lod_count = self._unpack('I', f)[0] socket_count = self._unpack('I', f)[0] animation_weight_count = self._unpack('I', f)[0] - animation_binding_count = self._unpack('I', f)[0] + animation_header_count = self._unpack('I', f)[0] string_data_length = self._unpack('I', f)[0] physics_weight_count = self._unpack('I', f)[0] physics_shape_count = self._unpack('I', f)[0] @@ -657,17 +660,27 @@ def from_file(self, path): # +# ANIM_No_Compression = 0 +# ANIM_Compression = 1 +# ANIM_Carry_Over = 2 # RLE # Process lists, TRUE if the value is there, FALSE if it's assumed data. # Dictionary per animation, Location/Rotation process_section = [] + animation_data_lengths = [] + + before_flags = f.tell() + - for i in range(animation_count): + + for i in range(animation_header_count): current_data_read = 0 process_location = [] process_rotation = [] + process_flags = [] + # TODO: This should be a per each animation loop starting here! @@ -682,36 +695,115 @@ def from_file(self, path): combined_track_size = track_1_size + track_2_size + print("Reading animation %d at %d" % (i, f.tell())) + + assert(combined_track_size != 0) + + animation_data_lengths.append({ 'track1': track_1_size, 'track2': track_2_size, 'total': combined_track_size }) + # Data is formatted like... # ((Location, Rotation) * NodeCount) * KeyframeCount is_location = True + + # There can be 0xFFFF after all the data has technically been read... + # So this will allow us to check that too + wrap_up = False + + ######################################################################### + # New pass + + + def read_location_flag(current_track, data_length): + if data_length == 0xC: + return { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } + elif data_length == 0x6: + return { 'type': 'location', 'track': current_track, 'process': ANIM_Compression } + elif data_length == 0xFFFF: + return { 'type': 'location', 'track': current_track, 'process': ANIM_Carry_Over } + + raise Exception("Location data read of %d !!!" % data_length) + + def read_rotation_flag(current_track, data_length): + if data_length == 0xFFFF: + return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Carry_Over } + # Rotation is always compressed?? + return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Compression } + + + def read_flag(is_location, current_track, data_length): + if is_location: + return read_location_flag(current_track, data_length) + + return read_rotation_flag(current_track, data_length) + + # By default start on track 1 + current_track = 1 + + # Special case, in case the first flag is 0xFFFF + if track_2_size > 0 and track_1_size == 0: + current_track = 2 + + + # Works on everything but interlaced Track1/2 data. while current_data_read < combined_track_size: flag = self._unpack('H', f)[0] # Carry over data from previous frame if flag == 0xFFFF: - if is_location: - process_location.append(False) - else: - process_rotation.append(False) - + process_flags.append(read_flag(is_location, current_track, flag)) is_location = not is_location continue + elif wrap_up: + # We need to go back a bit now... + f.seek(-2, 1) + break + # Check which track the flag is on data_read = flag if flag >= 0x8000: + current_track = 1 data_read -= 0x8000 - - if is_location: - process_location.append(True) else: - process_rotation.append(True) + current_track = 2 + + # Just skip the first entry, we want to read ahead! + if data_read == 0: + continue + + # Get the size of the data + data_length = data_read - current_data_read + + process_flags.append(read_flag(is_location, current_track, data_length)) - current_data_read += data_read + current_data_read += data_length is_location = not is_location - process_section.append( { 'location': process_location, 'rotation': process_rotation } ) + # Special end conditioning + # We need to guess what finishes this count off. + if is_location: + final_data_length = 0xC + if final_data_length + current_data_read != combined_track_size: + final_data_length = 0x6 + + if final_data_length + current_data_read == combined_track_size: + process_flags.append(read_flag(is_location, current_track, final_data_length)) + is_location = not is_location + wrap_up = True + else: + final_data_length = 0x8 + if final_data_length + current_data_read == combined_track_size: + process_flags.append(read_flag(is_location, current_track, final_data_length)) + is_location = not is_location + wrap_up = True + # End While + + process_section.append(process_flags) + + # End For + + # End pass + ######################################################################### # Okay save the current position, and read ahead to the keyframe data animation_position = f.tell() @@ -720,7 +812,7 @@ def from_file(self, path): f.seek(animation_data_length , 1) #model.anim_bindings = [self._read_anim_binding(f) for _ in range(animation_binding_count)] - anim_infos = [self._read_anim_info(f) for _ in range(animation_binding_count)] + anim_infos = [self._read_anim_info(f) for _ in range(animation_count)] animation_binding_position = f.tell() f.seek(animation_position, 0) From 5618c4e2e7051898d67d7fc2ed91fe3f1cdadfac Mon Sep 17 00:00:00 2001 From: Jake Date: Sat, 30 Jan 2021 18:30:22 -0800 Subject: [PATCH 05/24] Clean up --- src/abc.py | 2 +- src/reader_model00p_pc.py | 32 +++----------------------------- 2 files changed, 4 insertions(+), 30 deletions(-) diff --git a/src/abc.py b/src/abc.py index 5eb5d7a..91b9d80 100644 --- a/src/abc.py +++ b/src/abc.py @@ -227,7 +227,7 @@ def __init__(self): self.animation_header_index = -1 self.data_position = -1 - self.compressed = -1 # Location compression only! Rotation data is always compressed. + self.is_compressed = -1 # Location compression only! Rotation data is always compressed. class AnimInfo(object): def __init__(self): diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index a4a1998..141c13f 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -608,9 +608,9 @@ def from_file(self, path): self.version = self._unpack('I', f)[0] - # Fear for now! - if self.version is not 33: - raise Exception('Unsupported File Version! Importer currently only supports v33.') + # Fear and Condemned + if self.version not in [33, 34]: + raise Exception('Unsupported File Version! Importer currently only supports v33/v34.') # End If model.version = self.version @@ -655,41 +655,16 @@ def from_file(self, path): if unknown != 0: print("Unknown animation value is not 0! It's %d" % unknown) - - - - # - -# ANIM_No_Compression = 0 -# ANIM_Compression = 1 -# ANIM_Carry_Over = 2 - # RLE # Process lists, TRUE if the value is there, FALSE if it's assumed data. # Dictionary per animation, Location/Rotation process_section = [] animation_data_lengths = [] - before_flags = f.tell() - - - for i in range(animation_header_count): current_data_read = 0 - process_location = [] - process_rotation = [] - process_flags = [] - # TODO: This should be a per each animation loop starting here! - - - # Track 1 seems to be all rotation data up front? - # Track 2 seems to be Location/Rotation data - - # Or that's suppose to how it goes, but I don't even know maaan. - - # Track 2 = val | 0x8000 track_1_size = self._unpack('H', f)[0] track_2_size = self._unpack('H', f)[0] @@ -712,7 +687,6 @@ def from_file(self, path): ######################################################################### # New pass - def read_location_flag(current_track, data_length): if data_length == 0xC: return { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } From 994ee23eed4c99711567003f21d4777b9bcd8e58 Mon Sep 17 00:00:00 2001 From: Jake Date: Sat, 30 Jan 2021 23:04:38 -0800 Subject: [PATCH 06/24] Very messily (and incorrectly) read animations --- src/abc.py | 1 + src/importer.py | 3 +- src/reader_model00p_pc.py | 152 +++++++++++++++++++++++++++++++++----- 3 files changed, 137 insertions(+), 19 deletions(-) diff --git a/src/abc.py b/src/abc.py index 91b9d80..eb0e271 100644 --- a/src/abc.py +++ b/src/abc.py @@ -198,6 +198,7 @@ def __init__(self): self.unknown1 = -1 self.interpolation_time = 200 self.keyframes = [] + self.keyframe_count = 0 self.node_keyframe_transforms = [] # Version 6 specific diff --git a/src/importer.py b/src/importer.py index 6e96694..80e218c 100644 --- a/src/importer.py +++ b/src/importer.py @@ -23,6 +23,7 @@ class ModelImportOptions(object): def __init__(self): self.should_merge_duplicate_verts = False + self.should_import_vertex_animations = False self.should_import_animations = False self.should_import_sockets = False self.bone_length_min = 0.1 @@ -737,7 +738,7 @@ def execute(self, context): image = None options = ModelImportOptions() - + options.should_import_animations = True options.image = image #try: # import_model(model, options) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 141c13f..c4e8e05 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -77,12 +77,16 @@ def _read_matrix(self, f): rows = [data[0:4], data[4:8], data[8:12], data[12:16]] return Matrix(rows) + def _read_short_vector(self, f): + x,y,z = self._unpack('3H', f) + return [x,y,z] + def _read_vector(self, f): return Vector(self._unpack('3f', f)) def _read_short_quaternion(self, f): x, y, z, w = self._unpack('4H', f) - return [w,x,y,z]#Quaternion((w, x, y, z)) + return [w,x,y,z] def _read_quaternion(self, f): x, y, z, w = self._unpack('4f', f) @@ -578,6 +582,9 @@ def _read_anim_info(self, f): anim_info.binding = self._read_anim_binding(f) + anim_info.animation.extents = anim_info.binding.extents + anim_info.animation.interpolation_time = anim_info.binding.interpolation_time + anim_info.animation.name = anim_info.binding.name anim_info.animation.keyframe_count = self._unpack('I', f)[0] anim_info.animation.keyframes = [self._read_keyframe(f) for _ in range(anim_info.animation.keyframe_count)] @@ -662,7 +669,10 @@ def from_file(self, path): animation_data_lengths = [] for i in range(animation_header_count): + # Total current_data_read = 0 + # Track based + track_data_read = [0, 0] process_flags = [] track_1_size = self._unpack('H', f)[0] @@ -720,11 +730,14 @@ def read_flag(is_location, current_track, data_length): # Works on everything but interlaced Track1/2 data. while current_data_read < combined_track_size: + debug_ftell = f.tell() + flag = self._unpack('H', f)[0] # Carry over data from previous frame if flag == 0xFFFF: process_flags.append(read_flag(is_location, current_track, flag)) + # Seems safe to flip flop here... is_location = not is_location continue elif wrap_up: @@ -745,31 +758,40 @@ def read_flag(is_location, current_track, data_length): continue # Get the size of the data - data_length = data_read - current_data_read + data_length = data_read - track_data_read[ current_track - 1 ] + + if data_length == 0xC or data_length == 0x6: + is_location = True + elif data_length == 0x8: + is_location = False + else: + raise Exception("Invalid data length %d" % data_length) process_flags.append(read_flag(is_location, current_track, data_length)) current_data_read += data_length - - is_location = not is_location + track_data_read[ current_track - 1 ] += data_length # Special end conditioning # We need to guess what finishes this count off. - if is_location: - final_data_length = 0xC - if final_data_length + current_data_read != combined_track_size: - final_data_length = 0x6 - - if final_data_length + current_data_read == combined_track_size: - process_flags.append(read_flag(is_location, current_track, final_data_length)) - is_location = not is_location - wrap_up = True - else: + is_location = True + final_data_length = 0xC + + # Is it compressed? + if final_data_length + current_data_read != combined_track_size: + final_data_length = 0x6 + + # Is it actually rotation?? + if final_data_length + current_data_read != combined_track_size: + is_location = False final_data_length = 0x8 - if final_data_length + current_data_read == combined_track_size: - process_flags.append(read_flag(is_location, current_track, final_data_length)) - is_location = not is_location - wrap_up = True + + # Cool? + if final_data_length + current_data_read == combined_track_size: + process_flags.append(read_flag(is_location, current_track, final_data_length)) + is_location = not is_location + wrap_up = True + # End While process_section.append(process_flags) @@ -791,6 +813,100 @@ def read_flag(is_location, current_track, data_length): animation_binding_position = f.tell() f.seek(animation_position, 0) + ######################################################################### + # Animation Pass + + # Special case read + locations = [] + rotations = [] + + default_locations = [] + default_rotations = [] + + # Note: Defaults should be the node transform values, not Vector(0,0,0) for example. + + for node in model.nodes: + default_locations.append(node.location) + default_rotations.append(node.rotation) + + def decompress_vec(compressed_vec): + for i in range(len(compressed_vec)): + if compressed_vec[i] != 0: + compressed_vec[i] /= 64.0 + + return Vector( compressed_vec ) + + # Not really it, but a starting point! + def decompres_quat(compresed_quat): + # Find highest number, assume that's 1.0 + largest_number = -1 + for quat in compresed_quat: + if quat > largest_number: + largest_number = quat + + return Quaternion( ( compresed_quat[0] / largest_number, compresed_quat[1] / largest_number, compresed_quat[2] / largest_number, compresed_quat[3] / largest_number ) ) + + # Small helper function + def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, node_index): + if keyframe_index == 0: + return defaults_list[node_index] + + transform = keyframe_list[ keyframe_index - 1 ] + + if flag_type == 'location': + return transform.location + + return transform.rotation + + + # Should match up with animation count... + for anim_info in anim_infos: + # For ... { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } + + keyframe_transforms = [] + section = process_section[anim_info.binding.animation_header_index] + + for node_index in range(self.node_count): + + section_index = 0 + for keyframe_index in range(anim_info.animation.keyframe_count): + + transform = Animation.Keyframe.Transform() + + # Flags are per keyframe + flags = [ section[ section_index ], section[ section_index + 1 ] ] + section_index += 2 + + # Let's assume that it's always Location/Rotation + for flag in flags: + process = flag['process'] + + if flag['type'] == 'location': + if process == ANIM_No_Compression: + transform.location = self._read_vector(f) + elif process == ANIM_Compression: + transform.location = decompress_vec(self._read_short_vector(f)) + elif process == ANIM_Carry_Over: + transform.location = handle_carry_over( flag['type'], keyframe_transforms, default_locations, keyframe_index, node_index ) + else: + if process == ANIM_Compression: + transform.rotation = decompres_quat(self._read_short_quaternion(f)) + elif process == ANIM_Carry_Over: + transform.rotation = handle_carry_over( flag['type'], keyframe_transforms, default_rotations, keyframe_index, node_index ) + + keyframe_transforms.append(transform) + # End For + anim_info.animation.node_keyframe_transforms.append(keyframe_transforms) + # End For + + model.animations.append(anim_info.animation) + + + + # End Pass + ######################################################################### + + return model # Special case read From 62a9e7456792af16e4fa13955a3b6886776c6a6e Mon Sep 17 00:00:00 2001 From: Jake Date: Sun, 31 Jan 2021 20:50:59 -0800 Subject: [PATCH 07/24] Re-work animation schema (header) function. Still not working right on interlaced track models. But it's cleaner. --- src/reader_model00p_pc.py | 397 +++++++++++++++----------------------- 1 file changed, 159 insertions(+), 238 deletions(-) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index c4e8e05..b8349a4 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -597,6 +597,138 @@ def _read_weight_set(self, f): weight_set.node_weights = [unpack('f', f)[0] for _ in range(node_count)] return weight_set + + def _read_flag(self, is_location, current_track, data_length): + # Location data (Not Compressed and Compressed) + if data_length == 0xC: + return { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } + elif data_length == 0x6: + return { 'type': 'location', 'track': current_track, 'process': ANIM_Compression } + # Rotation data (Compressed) + elif data_length == 0x8: + return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Compression } + # Carry overs + elif data_length == 0xFFFF and is_location: + return { 'type': 'location', 'track': current_track, 'process': ANIM_Carry_Over } + elif data_length == 0xFFFF and not is_location: + return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Carry_Over } + + # Fallback in-case data is out of line! + raise Exception("Invalid data length (%d) for current track (%d). Is location flag (%d)" % (data_length, current_track, is_location)) + # End Def + + def _read_animation_schema(self, f): + # Basically a map for how we'll read a particular animation + # We return this data at the end of this function + compression_schema = [] + + # Data counters + total_data_read = 0 + track_data_read = [0, 0] + + # Generally the data flip flops between Location/Rotation/etc... + is_location = True + + # If we're basically done this flag will signify that we need to quit + # There may still be 0xFFFF data at the end of a schema though... + wrap_up = False + + track_1_size = self._unpack('H', f)[0] + track_2_size = self._unpack('H', f)[0] + + total_track_size = track_1_size + track_2_size + + # Safety, this shouldn't happen! + assert(total_track_size != 0) + + # By default start on track 1 + current_track = 1 + + # Special case, in case the first flag is 0xFFFF + if track_2_size > 0 and track_1_size == 0: + current_track = 2 + + while total_data_read < total_track_size: + debug_ftell = f.tell() + + # Read the next flag + flag = self._unpack('H', f)[0] + + if flag == 0xFFFF: + compression_schema.append(self._read_flag(is_location, current_track, flag)) + is_location = not is_location + continue + elif wrap_up == True: + # We need to go back a bit now... + f.seek(-2, 1) + break + + # + bytes_written = flag + + # So if we're at or above 0x8000, we're on track 1 + # To get the real bytes written, we need to remove the 0x8000 bit... + if bytes_written >= 0x8000: + current_track = 1 + bytes_written -= 0x8000 + else: + current_track = 2 + + # We're reading ahead, so we're okay to skip 0's + if bytes_written == 0: + is_location = not is_location + continue + + # Get the size of the data + data_length = bytes_written - track_data_read[ current_track - 1 ] + + compression_schema.append(self._read_flag(is_location, current_track, data_length)) + + is_location = not is_location + + total_data_read += data_length + track_data_read[ current_track - 1 ] += data_length + + # Okay now we need to guess if we're at the end + # + final_is_location = True + final_data_length = 0x6 + + if final_data_length + total_data_read != total_track_size: + final_data_length = 0xC + + if final_data_length + total_data_read != total_track_size: + final_is_location = False + final_data_length = 0x8 + + if final_data_length + total_data_read == total_track_size: + print("Found final data (%d), wrapping up!" % final_data_length) + compression_schema.append(self._read_flag(final_is_location, current_track, final_data_length)) + is_location = not final_is_location + wrap_up = True + # End While + + ## + # DEBUG + + location_count = 0 + rotation_count = 0 + + for schema in compression_schema: + if schema['type'] == 'location': + location_count += 1 + else: + rotation_count += 1 + # End For + + print("Testing out count: %d == %d ?" % (location_count, rotation_count)) + assert(location_count == rotation_count) + + ## + + return compression_schema + # End Def + def from_file(self, path): model = Model() model.name = os.path.splitext(os.path.basename(path))[0] @@ -630,7 +762,7 @@ def from_file(self, path): self.lod_count = self._unpack('I', f)[0] socket_count = self._unpack('I', f)[0] animation_weight_count = self._unpack('I', f)[0] - animation_header_count = self._unpack('I', f)[0] + animation_schema_count = self._unpack('I', f)[0] string_data_length = self._unpack('I', f)[0] physics_weight_count = self._unpack('I', f)[0] physics_shape_count = self._unpack('I', f)[0] @@ -665,141 +797,10 @@ def from_file(self, path): # RLE # Process lists, TRUE if the value is there, FALSE if it's assumed data. # Dictionary per animation, Location/Rotation - process_section = [] - animation_data_lengths = [] - - for i in range(animation_header_count): - # Total - current_data_read = 0 - # Track based - track_data_read = [0, 0] - process_flags = [] - - track_1_size = self._unpack('H', f)[0] - track_2_size = self._unpack('H', f)[0] - - combined_track_size = track_1_size + track_2_size - - print("Reading animation %d at %d" % (i, f.tell())) - - assert(combined_track_size != 0) - - animation_data_lengths.append({ 'track1': track_1_size, 'track2': track_2_size, 'total': combined_track_size }) - - # Data is formatted like... - # ((Location, Rotation) * NodeCount) * KeyframeCount - is_location = True - - # There can be 0xFFFF after all the data has technically been read... - # So this will allow us to check that too - wrap_up = False - - ######################################################################### - # New pass - - def read_location_flag(current_track, data_length): - if data_length == 0xC: - return { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } - elif data_length == 0x6: - return { 'type': 'location', 'track': current_track, 'process': ANIM_Compression } - elif data_length == 0xFFFF: - return { 'type': 'location', 'track': current_track, 'process': ANIM_Carry_Over } - - raise Exception("Location data read of %d !!!" % data_length) - - def read_rotation_flag(current_track, data_length): - if data_length == 0xFFFF: - return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Carry_Over } - # Rotation is always compressed?? - return { 'type': 'rotation', 'track': current_track, 'process': ANIM_Compression } - - - def read_flag(is_location, current_track, data_length): - if is_location: - return read_location_flag(current_track, data_length) - - return read_rotation_flag(current_track, data_length) - - # By default start on track 1 - current_track = 1 - - # Special case, in case the first flag is 0xFFFF - if track_2_size > 0 and track_1_size == 0: - current_track = 2 - - - # Works on everything but interlaced Track1/2 data. - while current_data_read < combined_track_size: - debug_ftell = f.tell() - - flag = self._unpack('H', f)[0] - - # Carry over data from previous frame - if flag == 0xFFFF: - process_flags.append(read_flag(is_location, current_track, flag)) - # Seems safe to flip flop here... - is_location = not is_location - continue - elif wrap_up: - # We need to go back a bit now... - f.seek(-2, 1) - break - - # Check which track the flag is on - data_read = flag - if flag >= 0x8000: - current_track = 1 - data_read -= 0x8000 - else: - current_track = 2 - - # Just skip the first entry, we want to read ahead! - if data_read == 0: - continue - - # Get the size of the data - data_length = data_read - track_data_read[ current_track - 1 ] - - if data_length == 0xC or data_length == 0x6: - is_location = True - elif data_length == 0x8: - is_location = False - else: - raise Exception("Invalid data length %d" % data_length) - - process_flags.append(read_flag(is_location, current_track, data_length)) - - current_data_read += data_length - track_data_read[ current_track - 1 ] += data_length - - # Special end conditioning - # We need to guess what finishes this count off. - is_location = True - final_data_length = 0xC - - # Is it compressed? - if final_data_length + current_data_read != combined_track_size: - final_data_length = 0x6 - - # Is it actually rotation?? - if final_data_length + current_data_read != combined_track_size: - is_location = False - final_data_length = 0x8 - - # Cool? - if final_data_length + current_data_read == combined_track_size: - process_flags.append(read_flag(is_location, current_track, final_data_length)) - is_location = not is_location - wrap_up = True + animation_schemas = [] - # End While - - process_section.append(process_flags) - - # End For - - # End pass - ######################################################################### + for _ in range(animation_schema_count): + animation_schemas.append(self._read_animation_schema(f)) # Okay save the current position, and read ahead to the keyframe data animation_position = f.tell() @@ -844,7 +845,11 @@ def decompres_quat(compresed_quat): if quat > largest_number: largest_number = quat - return Quaternion( ( compresed_quat[0] / largest_number, compresed_quat[1] / largest_number, compresed_quat[2] / largest_number, compresed_quat[3] / largest_number ) ) + for i in range(len(compresed_quat)): + if compresed_quat[i] != 0: + compresed_quat[i] / largest_number + + return Quaternion( compresed_quat ) # Small helper function def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, node_index): @@ -864,13 +869,18 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # For ... { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } keyframe_transforms = [] - section = process_section[anim_info.binding.animation_header_index] - - for node_index in range(self.node_count): - + section = animation_schemas[anim_info.binding.animation_header_index] + + for keyframe_index in range(anim_info.animation.keyframe_count): section_index = 0 - for keyframe_index in range(anim_info.animation.keyframe_count): + for node_index in range(self.node_count): + # Make sure we have space here... + try: + anim_info.animation.node_keyframe_transforms[node_index] + except: + anim_info.animation.node_keyframe_transforms.append([]) + transform = Animation.Keyframe.Transform() # Flags are per keyframe @@ -887,17 +897,19 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n elif process == ANIM_Compression: transform.location = decompress_vec(self._read_short_vector(f)) elif process == ANIM_Carry_Over: - transform.location = handle_carry_over( flag['type'], keyframe_transforms, default_locations, keyframe_index, node_index ) + transform.location = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_locations, keyframe_index, node_index ) else: if process == ANIM_Compression: transform.rotation = decompres_quat(self._read_short_quaternion(f)) elif process == ANIM_Carry_Over: - transform.rotation = handle_carry_over( flag['type'], keyframe_transforms, default_rotations, keyframe_index, node_index ) + transform.rotation = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_rotations, keyframe_index, node_index ) + # End For (Flag) - keyframe_transforms.append(transform) - # End For - anim_info.animation.node_keyframe_transforms.append(keyframe_transforms) - # End For + # Insert the transform + anim_info.animation.node_keyframe_transforms[node_index].append(transform) + + # End For (Node) + # End For (Keyframe) model.animations.append(anim_info.animation) @@ -908,97 +920,6 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n return model - - # Special case read - locations = [] - rotations = [] - - default_locations = [] - default_rotations = [] - - # Note: Defaults should be the node transform values, not Vector(0,0,0) for example. - - for node in model.nodes: - default_locations.append(node.location) - default_rotations.append(node.rotation) - - # Not really it, but a starting point! - def decompres_quat(compresed_quat): - # Find highest number, assume that's 1.0 - largest_number = -1 - for quat in compresed_quat: - if quat > largest_number: - largest_number = quat - - return Quaternion( ( compresed_quat[0] / largest_number, compresed_quat[1] / largest_number, compresed_quat[2] / largest_number, compresed_quat[3] / largest_number ) ) - - - - # Location and Rotation intermixed - if track_1_size == 0: - print("Reading animation data in MODE A") - for i in range(len(process_location)): - read_location = process_location[i] - read_rotation = process_rotation[i] - - if read_location: - locations.append(self._read_vector(f)) - else: - if i == 0: - locations.append( default_locations[i] ) - else: - locations.append( locations[i - 1] ) - - if read_rotation: - rotations.append( decompres_quat(self._read_short_quaternion(f)) ) - else: - if i == 0: - rotations.append( default_rotations[i] ) - else: - rotations.append( rotations[i - 1] ) - # Rotation only -- Not correct!!! - elif track_2_size == 0: - print("Reading animation data in MODE B") - for i in range(len(process_location)): - read_rotation = process_rotation[i] - - if read_rotation: - rotations.append( decompres_quat(self._read_short_quaternion(f)) ) - else: - if i == 0: - rotations.append( default_rotations[i] ) - else: - rotations.append( rotations[i - 1] ) - # Rotation data then Location data - else: - print("Reading animation data in MODE C") - for i in range(len(process_location)): - read_rotation = process_rotation[i] - - if read_rotation: - rotations.append( decompres_quat(self._read_short_quaternion(f)) ) - else: - if i == 0: - rotations.append( default_rotations[i] ) - else: - rotations.append( rotations[i - 1] ) - - for i in range(len(process_location)): - read_location = process_location[i] - read_rotation = process_rotation[i] - - if read_location: - locations.append(self._read_vector(f)) - else: - if i == 0: - locations.append( default_locations[i] ) - else: - locations.append( locations[i - 1] ) - - - - return model - #old # # # # HEADER From e4bb9caf9186ada19589017f5d9d49cca3f39801 Mon Sep 17 00:00:00 2001 From: Jake Date: Mon, 1 Feb 2021 21:41:20 -0800 Subject: [PATCH 08/24] More re-works --- src/importer.py | 2 +- src/reader_model00p_pc.py | 95 +++++++++++++++++++++++---------------- 2 files changed, 58 insertions(+), 39 deletions(-) diff --git a/src/importer.py b/src/importer.py index 80e218c..9c07671 100644 --- a/src/importer.py +++ b/src/importer.py @@ -355,7 +355,7 @@ def recursively_apply_transform(nodes, node_index, pose_bones, parent_matrix): matrix = parent_matrix @ matrix pose_bone.matrix = matrix - + for _ in range(0, node.child_count): node_index = node_index + 1 node_index = recursively_apply_transform(nodes, node_index, pose_bones, pose_bone.matrix) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index b8349a4..1c7094c 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -629,10 +629,6 @@ def _read_animation_schema(self, f): # Generally the data flip flops between Location/Rotation/etc... is_location = True - # If we're basically done this flag will signify that we need to quit - # There may still be 0xFFFF data at the end of a schema though... - wrap_up = False - track_1_size = self._unpack('H', f)[0] track_2_size = self._unpack('H', f)[0] @@ -648,7 +644,48 @@ def _read_animation_schema(self, f): if track_2_size > 0 and track_1_size == 0: current_track = 2 - while total_data_read < total_track_size: + # This almost works. It turns out everything is location/rotation + # But we just need to determine if the file is compressed... + flag_position = f.tell() + is_compressed = False + + # Hacky way to figure out if we're dealing with compressed location data + # Some special cases up front, then a quick peak at the data to determine if compression is used! + if total_track_size == 0x6: + # If we're simply one location entry, and it's 0x6..then we're compressed! + is_compressed = True + elif total_track_size == 0x8: + # We don't actually care about location here, as it's not used! + is_compressed = False + else: + _total = 0 + # Run ahead a bit and check + while True: + flag = self._unpack('H', f)[0] + + if flag == 0xFFFF: + continue + + if flag >= 0x8000: + flag -= 0x8000 + + flag -= _total + + if flag == 0x6: + is_compressed = True + break + elif flag == 0xC: + is_compressed = False + break + + _total += flag + # End While + + # Move back to the flag position + f.seek(flag_position, 0) + + # Use is_compressed to determine what we're stepping up by, and swap location = not location! + while True: #not wrap_up and total_data_read < total_track_size: debug_ftell = f.tell() # Read the next flag @@ -658,29 +695,28 @@ def _read_animation_schema(self, f): compression_schema.append(self._read_flag(is_location, current_track, flag)) is_location = not is_location continue - elif wrap_up == True: - # We need to go back a bit now... + # End condition + elif total_data_read == total_track_size: + # Okay no more flags? Then move back one flag's worth of bytes, and quit f.seek(-2, 1) break - # - bytes_written = flag - # So if we're at or above 0x8000, we're on track 1 # To get the real bytes written, we need to remove the 0x8000 bit... - if bytes_written >= 0x8000: + if flag >= 0x8000: current_track = 1 - bytes_written -= 0x8000 else: current_track = 2 - # We're reading ahead, so we're okay to skip 0's - if bytes_written == 0: - is_location = not is_location - continue - # Get the size of the data - data_length = bytes_written - track_data_read[ current_track - 1 ] + # If rotation + data_length = 0x8 + + if is_location: + if is_compressed: + data_length = 0x6 + else: + data_length = 0xC compression_schema.append(self._read_flag(is_location, current_track, data_length)) @@ -688,24 +724,6 @@ def _read_animation_schema(self, f): total_data_read += data_length track_data_read[ current_track - 1 ] += data_length - - # Okay now we need to guess if we're at the end - # - final_is_location = True - final_data_length = 0x6 - - if final_data_length + total_data_read != total_track_size: - final_data_length = 0xC - - if final_data_length + total_data_read != total_track_size: - final_is_location = False - final_data_length = 0x8 - - if final_data_length + total_data_read == total_track_size: - print("Found final data (%d), wrapping up!" % final_data_length) - compression_schema.append(self._read_flag(final_is_location, current_track, final_data_length)) - is_location = not final_is_location - wrap_up = True # End While ## @@ -847,7 +865,7 @@ def decompres_quat(compresed_quat): for i in range(len(compresed_quat)): if compresed_quat[i] != 0: - compresed_quat[i] / largest_number + compresed_quat[i] /= largest_number return Quaternion( compresed_quat ) @@ -868,7 +886,6 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n for anim_info in anim_infos: # For ... { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } - keyframe_transforms = [] section = animation_schemas[anim_info.binding.animation_header_index] for keyframe_index in range(anim_info.animation.keyframe_count): @@ -889,6 +906,8 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # Let's assume that it's always Location/Rotation for flag in flags: + debug_ftell = f.tell() + process = flag['process'] if flag['type'] == 'location': From f28a6853812fbbd0be2dbe71883f9e19e5c6b303 Mon Sep 17 00:00:00 2001 From: Jake Date: Wed, 3 Feb 2021 14:43:12 -0800 Subject: [PATCH 09/24] Update the model decompression python script. Supports drag and drop. --- research/gci_mdl_decompress.py | 19 ------------------- research/ltjex_mdl_decompress.py | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 19 deletions(-) delete mode 100644 research/gci_mdl_decompress.py create mode 100644 research/ltjex_mdl_decompress.py diff --git a/research/gci_mdl_decompress.py b/research/gci_mdl_decompress.py deleted file mode 100644 index b21287f..0000000 --- a/research/gci_mdl_decompress.py +++ /dev/null @@ -1,19 +0,0 @@ -import zlib, sys - -# -# Gotham City Imposters (and probably others) model file seems to be zlib compressed. -# Here's a quick decompress script, it'll skip past some header bytes. -# - -# Insert model name here, this expects the file in the current directory -# The file will be saved as out.model00p -fp = open('bat.mdl', 'rb') - -# -# Don't touch below here!! -# -fp.seek(8, 1) -file_data = zlib.decompress(fp.read()) -f = open('out.model00p', 'wb') -f.write(file_data) -f.close() \ No newline at end of file diff --git a/research/ltjex_mdl_decompress.py b/research/ltjex_mdl_decompress.py new file mode 100644 index 0000000..ba653b8 --- /dev/null +++ b/research/ltjex_mdl_decompress.py @@ -0,0 +1,27 @@ +# +# Gotham City Imposters (and probably others) model file seems to be zlib compressed. +# Here's a quick decompress script, it'll skip past some header bytes. +# +# Just drag and drop files here! +# +import zlib, sys, os + +if not os.path.isdir('./out'): + os.makedirs('./out') + +file_paths = sys.argv[1:] +for path in file_paths: + if not os.path.isfile(path): + continue + + file_name = os.path.basename(path) + + fp = open(path, 'rb') + fp.seek(8, 1) + out_file_data = zlib.decompress(fp.read()) + f = open('./out/%s' % file_name, 'wb') + f.write(out_file_data) + f.close() + + print("Decompressed file to ./out/%s" % file_name) + From f2a4746433290658444051b03f15b76fb86ed7f1 Mon Sep 17 00:00:00 2001 From: Jake Date: Wed, 3 Feb 2021 14:43:38 -0800 Subject: [PATCH 10/24] Some initial research on .mdl files. (For funsies.) --- src/importer.py | 16 +++++++++++++++- src/reader_model00p_pc.py | 26 ++++++++++++++++++++------ 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/importer.py b/src/importer.py index 9c07671..913b74c 100644 --- a/src/importer.py +++ b/src/importer.py @@ -718,9 +718,23 @@ class ImportOperatorModel00p(bpy.types.Operator, bpy_extras.io_utils.ImportHelpe maxlen=255, # Max internal buffer length, longer would be clamped. ) + should_import_animations: BoolProperty( + name="Import Animations", + description="When checked, animations will be imported as actions.", + default=True, + ) + def draw(self, context): layout = self.layout + + box = layout.box() + box.label(text='Animations') + box.row().prop(self, 'should_import_animations') + + + + def execute(self, context): @@ -738,7 +752,7 @@ def execute(self, context): image = None options = ModelImportOptions() - options.should_import_animations = True + options.should_import_animations = self.should_import_animations options.image = image #try: # import_model(model, options) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 1c7094c..2002ad1 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -426,7 +426,10 @@ def _read_node(self, f): node = Node() name_offset = self._unpack('I', f)[0] node.name = self._get_string_from_table(name_offset) - node.index = self._unpack('H', f)[0] + + if self.version != 42: + node.index = self._unpack('H', f)[0] + node.flags = self._unpack('b', f)[0] node.location = self._read_vector(f) @@ -765,22 +768,27 @@ def from_file(self, path): self.version = self._unpack('I', f)[0] - # Fear and Condemned - if self.version not in [33, 34]: + # FEAR, Condemned, FEAR 2 + if self.version not in [33, 34, 42]: raise Exception('Unsupported File Version! Importer currently only supports v33/v34.') # End If model.version = self.version keyframe_count = self._unpack('I', f)[0] + animation_count = self._unpack('I', f)[0] + self.node_count = self._unpack('I', f)[0] piece_count = self._unpack('I', f)[0] child_model_count = self._unpack('I', f)[0] self.lod_count = self._unpack('I', f)[0] socket_count = self._unpack('I', f)[0] - animation_weight_count = self._unpack('I', f)[0] - animation_schema_count = self._unpack('I', f)[0] + + if self.version != 42: + animation_weight_count = self._unpack('I', f)[0] + animation_schema_count = self._unpack('I', f)[0] + string_data_length = self._unpack('I', f)[0] physics_weight_count = self._unpack('I', f)[0] physics_shape_count = self._unpack('I', f)[0] @@ -793,8 +801,11 @@ def from_file(self, path): ragdoll_constraint_count = self._unpack('I', f)[0] wheel_constraint_count = self._unpack('I', f)[0] prismatic_constraint_count = self._unpack('I', f)[0] + # End - animation_data_length = self._unpack('I', f)[0] + if self.version != 42: + animation_data_length = self._unpack('I', f)[0] + self.string_table = self._read_fixed_string(string_data_length, f) # @@ -803,6 +814,9 @@ def from_file(self, path): model.nodes = [self._read_node(f) for _ in range(self.node_count)] build_undirected_tree(model.nodes) + if self.version == 42: + return model + # # Animations # From a7b2515dfaaad91e7013d982aacfa9c72271eb7d Mon Sep 17 00:00:00 2001 From: Jake Date: Fri, 5 Feb 2021 20:31:13 -0800 Subject: [PATCH 11/24] Added binary template for 010 editor (incomplete) --- research/pc_model00p.bt | 421 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 421 insertions(+) create mode 100644 research/pc_model00p.bt diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt new file mode 100644 index 0000000..8e889ba --- /dev/null +++ b/research/pc_model00p.bt @@ -0,0 +1,421 @@ +//------------------------------------------------ +//--- 010 Editor v11.0 Binary Template +// +// File: PC Model00p Template +// Authors: HeyThereCoffeee +// Version: +// Purpose: +// Category: +// File Mask: +// ID Bytes: +// History: +//------------------------------------------------ + +struct LTString { + ushort Length; + char Value[Length]; +}; + +struct LTUV { + float u,v; +}; + +// Divide by 64 +struct LTCompressedVector { + short x,y,z; +}; + +struct LTVector { + float x,y,z; +}; + +// Divide by 0x7FFF +struct LTCompressedQuat { + ushort x,y,z,w; +}; + +struct LTQuat { + float x,y,z,w; +}; + +struct LTMatrix { + LTQuat m[4]; +}; + +struct Header { + char Format[4]; + int Version; + int KeyframeCount; + int AnimationCount; + int NodeCount; + int PieceCount; + int ChildModelCount; // For some reason this always includes the current model, so it's always +1 more! + int LODCount; + int SocketCount; + int AnimationWeightCount; + int AnimationHeaderCount; // Use to be AnimationBindingCount... + int StringDataLength; // Length in bytes of the text section + int PhysicsWeightCount; + int PhysicsShapeCount; + int Unk12; + int Unk13; + int StiffSpringConstraintCount; + int HingeConstraintCount; + int LimitedHingeConstraintCount; + int RagdollConstraintCount; + int WheelConstraintCount; + int PrismaticConstraintCount; + int AnimationDataLength;// Maybe Mesh or Animation Data? + char StringList[StringDataLength]; +}; + +/* + FUN_008a6b6b(&DAT_00cdb341,&PTR_LAB_00c9f4c0,FUN_008a6b6b, + (uint)(in_NT & 1) * 0x4000 | (uint)(in_OF & 1) * 0x800 | (uint)(in_IF & 1) * 0x200 | + (uint)(in_TF & 1) * 0x100 | (uint)(in_SF & 1) * 0x80 | (uint)(in_ZF & 1) * 0x40 | + (uint)(in_AF & 1) * 0x10 | (uint)(in_PF & 1) * 4 | (uint)(in_CF & 1) | + (uint)(in_ID & 1) * 0x200000 | (uint)(in_VIP & 1) * 0x100000 | + (uint)(in_VIF & 1) * 0x80000 | (uint)(in_AC & 1) * 0x40000); + +*/ +struct AnimationHeaderOld { + short UnkShort; + short what; + char what2; + int Unk[19]; + int Unk2[9]; + int AnimationDataLength; + ushort Flags[4]; +}; + +struct Node { + int NameOffset; + short Index; // Node Index + char Flag; // Node Flags + LTVector Location; + LTQuat Rotation; + int ChildCount; +}; + +struct Transform { + LTVector Position; + LTCompressedQuat Rotation; +}; + +struct TransformNoRotation { + LTVector Position; +}; + +struct TransformNoPosition { + LTCompressedQuat Rotation; +}; + + + +// Forward +struct AnimationHeader; + +struct Animation (int NodeCount, AnimationHeader &animHdr) { +/* + if (animHdr.Type3 & 0x8000) { + Transform FirstTransform;//[NodeCount]; + } else { + TransformNoPosition FirstTransform; + } + + + if (animHdr.Type5 & 0x8000) { + Transform Transforms[NodeCount-1]; + } else { + LTCompressedQuat Transforms[NodeCount-1]; + } +*/ + + +/* + if (animHdr.Type == 12) { + TransformNoRotation Transforms[NodeCount]; + } + else if (animHdr.Type == 16) { + TransformNoPosition Transforms[NodeCount]; + } else if (animHdr.Type == 40) { + Transform Transforms[NodeCount]; + } + + if (animHdr.Type2 == 12) { + TransformNoRotation Transforms2[NodeCount]; + } + else if (animHdr.Type2 == 16) { + TransformNoPosition Transforms2[NodeCount]; + } else if (animHdr.Type2 == 40) { + Transform Transforms2[NodeCount]; + } +*/ + + // 0xFFFF = no data + // 0x8000 = data! + // If 0x8000, then a running count in bytes is OR'd onto 0x8000. + // Ex/ First instance is 0x8000, a vector sized to 12 bytes occurs, the next 0x8000 instance will be 0x800C. + +/* +It starts off with the total data length, and then 0. Next two things can happen. (For reference: 65535 = 0xFFFF, and 32768 = 0x8000) + +It can be 0xFFFF which means no data/carry over the previous data (RLE), or it can be a value OR'd by 0x8000. If the value is 0x8000 it's OR'd by a running total of bytes. + +So to read in the flags you need to do something like this: +var total = read(2) +skip(2) // skip the 0 padding + +var running_total = 0 +var is_data_pos = True +while running_total < total: + var current_flag = read(2) + // Data seems to be stored as Pos, Rot, Pos, Rot... + // So flip the flag + is_data_pos = !is_data_pos + + // Skip this value! + if current_flag == 0xFFFF: + continue + + // Maybe not needed?? + // Get the amount that's been read so far + //var read_so_far = current_value - 0x8000 + + if is_data_pos: + running_total += 12 + else: + running_total += 8 // 4 shorts + +Although the RLE part seems a bit tricky, as it will only create another flag section if the data has changed. So I think I need to also track the total number of frames per this animation to determine which frames have data vs what is carried over. + +*/ +/* + // Example is based off 1x1 square + if (animHdr.Type3 != 65535) {//& 0x8000 == 0) { + LTVector Position; + } + if (animHdr.Type4 != 65535) { + LTCompressedQuat Rotation; + } + if (animHdr.Type5 != 65535) { + LTVector Position2; + } + if (animHdr.Type6 != 65535) { + LTCompressedQuat Rotation2; + } + */ + +}; + +// This is RLE boo! +// Type1 seems to be the size of the actual animation data, but it's sometimes not? +// There's then a lot of 0xFFFFFFF and some 0x8000! + +// Maybe?? +// Data length? +// Always ends with 0 section +// 0xFFFF signifies nothing here mate +// 0x8000 signifies repeat +// VALUE | 0x8000 signifies repeating this value +// Anything | 0x8000 eventually adds up to the data length? (Not in all cases) +struct AnimationHeader { + int Unk; + + ushort Track1; + ushort Track2; + + // Divide by 2 (without having to take into account 0!) + local int Track1Count = Track1 >> 1; + local int Track2Count = Track2 >> 1; + + //ushort Flags[Track1Count + Track2Count + 4000]; +}; + +struct AnimationKeyframe { + int Time; + int StringOffset; +}; + +struct AnimationInfo { + LTVector Dims; + float Radius; + int NameOffset; + int InterpTime; + int SchemaIndex; + int DataPosition; + int IsCompressed; + int KeyframeCount; + AnimationKeyframe Keyframes[KeyframeCount]; +}; + +struct Piece { + int Unk[18]; + char Unk; + char TextureIndex; + char Translucent; + char CastsShadow; +}; + +struct Test { + int Here; +}; + +struct AnimSchema { + char Data; +}; + +struct AnimData { + char Data; +}; + +struct ChildModel { + LTString Name; +}; + +struct ChildModelHeader { + int ChildModelCount; + ChildModel ChildModels[ChildModelCount - 1] ; +}; + +struct PhysicsWeightSet { + int NameOffset; +}; + +struct PhysicsHeader { + int VisNode; + float VisRadius; + int Unk1; + if (Unk1 > 0) { + char UnkFlag; + LTVector Offset; + } + int Unk2; + int WeightSetCount; + PhysicsWeightSet WeightSets[WeightSetCount] ; +}; + +struct AnimationWeightSet (int NodeCount) { + // Actually NameOffset + int NameOffset; + int Unk; // ? + float Weights[NodeCount]; +}; + +struct AnimationWeightSetHeader (int NodeCount) { + int Count; + AnimationWeightSet WeightSets(NodeCount)[Count] ; +}; + +struct LODGroup { + float LodDistance; + char TextureIndex; + char Translucent; + char CastShadow; + int Unk1; + int Unk2; +}; + +struct SubLODGroup { + float LodDistance; + char TextureIndex; + char Translucent; + char CastShadow; + int Unk1; +}; + +struct LODGroupHeader { + int HasLODs; + if (HasLODs > 0) { + int NameOffset; + int LODCount; + LODGroup LODGroups; + SubLODGroup SubLODs[ LODCount - 1 ] ; + } +}; + +struct Unk2 { + int Unk3; // LOD Related? + int PieceCount; + int Unk5; // LOD Related? + int Unk6; + int Unk7; + if (PieceCount == 0) { + LTString MeshFile; + } +}; + +struct Mesh { + LTVector Vertex; + LTVector Normals; + LTUV UVs; + LTVector Unk1; + LTVector Unk4; + uchar WeightInfo[4]; + int Unk3; +}; + +struct MeshHeader { + int TextureCount; + + int DataLength; + // DataCount / 2 = tri-fs count + int DataCount; + Mesh MeshData[DataLength/64]; +}; + +struct ContainsGeometry { + char Flag; +}; + +struct Socket { + int NodeIndex; + int NameOffset; + LTQuat Rotation; + LTVector Position; + float Scale; +}; + +struct SocketHeader { + int SocketCount; + Socket Sockets[SocketCount] ; +}; + +Header hdr; + +// String database, we need to skip it for now +//FSeek(FTell() + hdr.StringDataLength); + +Node Nodes[hdr.NodeCount]; +AnimationHeader animHdr; + +if (hdr.AnimationDataLength > 0) { + // CUBE + //AnimSchema skip[8]; // Replace this with the schema length + // DELTA + //AnimSchema skip[652]; // delta 652 + // ROPE + AnimSchema skip[32]; + // Condemned Default00p + //AnimSchema skip[8]; +} else { + AnimSchema skip[4]; +} +AnimData skip2[hdr.AnimationDataLength]; +//Animation Anim(hdr.NodeCount, animHdr)[hdr.AnimationCount] ; +AnimationInfo animInfo[hdr.AnimationCount] ; + +AnimationWeightSetHeader animWeightSetHeader(hdr.NodeCount); + +SocketHeader socketHeader; + +ChildModelHeader childModels; + +ContainsGeometry hasGeometry; +// Geometry related +PhysicsHeader PhysicsInfo; + + +LODGroupHeader lodGroupHeader; +Unk2 unk2; +MeshHeader meshHeader; \ No newline at end of file From ffc8b6842367af6575ce535b37613a664d092fdf Mon Sep 17 00:00:00 2001 From: Jake Date: Fri, 5 Feb 2021 22:06:33 -0800 Subject: [PATCH 12/24] Added some super basic mesh reading. --- research/pc_model00p.bt | 12 +- src/abc.py | 28 ++ src/importer.py | 2 +- src/reader_model00p_pc.py | 567 +++++++++++++++----------------------- 4 files changed, 247 insertions(+), 362 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 8e889ba..266429e 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -69,15 +69,6 @@ struct Header { char StringList[StringDataLength]; }; -/* - FUN_008a6b6b(&DAT_00cdb341,&PTR_LAB_00c9f4c0,FUN_008a6b6b, - (uint)(in_NT & 1) * 0x4000 | (uint)(in_OF & 1) * 0x800 | (uint)(in_IF & 1) * 0x200 | - (uint)(in_TF & 1) * 0x100 | (uint)(in_SF & 1) * 0x80 | (uint)(in_ZF & 1) * 0x40 | - (uint)(in_AF & 1) * 0x10 | (uint)(in_PF & 1) * 4 | (uint)(in_CF & 1) | - (uint)(in_ID & 1) * 0x200000 | (uint)(in_VIP & 1) * 0x100000 | - (uint)(in_VIF & 1) * 0x80000 | (uint)(in_AC & 1) * 0x40000); - -*/ struct AnimationHeaderOld { short UnkShort; short what; @@ -357,7 +348,8 @@ struct Mesh { struct MeshHeader { int TextureCount; - + + // DataLength / 64 = MeshData int DataLength; // DataCount / 2 = tri-fs count int DataCount; diff --git a/src/abc.py b/src/abc.py index eb0e271..4ceae95 100644 --- a/src/abc.py +++ b/src/abc.py @@ -93,6 +93,14 @@ def __init__(self): self.s = Vector() self.t = Vector() + # Model00p specific + self.distance = 0.0 + self.texture_index = 0 + self.translucent = 0 + self.cast_shadow = 0 + self.unk_1 = 0 + self.unk_2 = 0 + def get_face_vertices(self, face_index): return [self.vertices[vertex.vertex_index] for vertex in self.faces[face_index].vertices] @@ -240,7 +248,24 @@ def __init__(self): self.name = '' self.build_number = 0 self.transforms = [] +# +# Model00p+ specific +# +class Physics(object): + def __init__(self): + self.vis_node_index = 0 + self.vis_radius = 0.0 + + # Unk + self.unk_1 = 0 + # If Unk1 > 0 ? + self.unk_flag = 0 + self.unk_offset = Vector() + # End If + self.unk_2 = 0 + self.weight_set_count = 0 + self.weight_sets = [] # Array of name offsets class Model(object): def __init__(self): @@ -269,6 +294,9 @@ def __init__(self): # LTB specific + # Model00p specific + self.physics = None + @property def keyframe_count(self): diff --git a/src/importer.py b/src/importer.py index 913b74c..d116c34 100644 --- a/src/importer.py +++ b/src/importer.py @@ -721,7 +721,7 @@ class ImportOperatorModel00p(bpy.types.Operator, bpy_extras.io_utils.ImportHelpe should_import_animations: BoolProperty( name="Import Animations", description="When checked, animations will be imported as actions.", - default=True, + default=False, ) def draw(self, context): diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 2002ad1..67a6090 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -44,6 +44,9 @@ def __init__(self): self.lod_count = 0 self.string_table = "" + # Temp until I can figure out how animations work! + self._read_animations = False + # # Wrapper around .io.unpack that can eventually handle big-endian reads. # @@ -130,295 +133,95 @@ def _read_null_mesh(self, lod, f): f.seek(4, 1) return lod - def _read_rigid_mesh(self, lod, f): - data_type = unpack('4I', f) - bone = unpack('I', f)[0] - - # We need face vertex data alongside vertices! - face_vertex_list = [] - - for mask in data_type: - for _ in range(lod.vert_count): - vertex = Vertex() - face_vertex = FaceVertex() + def _read_mesh_data(self, lod, f): - # Dirty flags - is_vertex_used = False - is_face_vertex_used = False - - if mask & VTX_Position: - vertex.location = self._read_vector(f) - - # One bone per vertex - weight = Weight() - weight.node_index = bone - weight.bias = 1.0 - - vertex.weights.append(weight) - - is_vertex_used = True - if mask & VTX_Normal: - vertex.normal = self._read_vector(f) - is_vertex_used = True - if mask & VTX_Colour: - vertex.colour = unpack('i', f)[0] - is_vertex_used = True - if mask & VTX_UV_Sets_1: - face_vertex.texcoord.xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_2: - face_vertex.extra_texcoords[0].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_3: - face_vertex.extra_texcoords[1].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_4: - face_vertex.extra_texcoords[2].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_BasisVector: - vertex.s = self._read_vector(f) - vertex.t = self._read_vector(f) - is_vertex_used = True - # End If - - if is_vertex_used: - lod.vertices.append(vertex) - - if is_face_vertex_used: - face_vertex_list.append(face_vertex) - - # End For - # End For - - # Make sure our stuff is good!! - print ("Vert Count Check: %d/%d" % (lod.vert_count, len(lod.vertices))) - assert(lod.vert_count == len(lod.vertices)) - - # We need a "global" face, we'll fill it and re-use it. + vertex = Vertex() face = Face() - for _ in range(lod.face_count * 3): - vertex_index = unpack('H', f)[0] - - face_vertex = face_vertex_list[vertex_index] - face_vertex.vertex_index = vertex_index - - # If we have room, append! - if len(face.vertices) < 3: - face.vertices.append(face_vertex) - # End If - - # If we're now over, then flush! - if len(face.vertices) >= 3: - lod.faces.append(face) - # Make a new face, and append our face vertex - face = Face() - # End If - # End For - - # Make sure our stuff is good!! - print ("Face Count Check: %d/%d" % (lod.face_count, len(lod.faces))) - assert(lod.face_count == len(lod.faces)) - - return lod - - def _read_skeletal_mesh(self, lod, f): - reindexed_bone = unpack('B', f)[0] - data_type = unpack('4I', f) - - matrix_palette = unpack('B', f)[0] - - print("Matrix Palette? %d" % matrix_palette) - - # We need face vertex data alongside vertices! - face_vertex_list = [] - - for mask in data_type: - for _ in range(lod.vert_count): - vertex = Vertex() - face_vertex = FaceVertex() - - # Dirty flags - is_vertex_used = False - is_face_vertex_used = False - - if mask & VTX_Position: - vertex.location = self._read_vector(f) - is_vertex_used = True - - weights = [] - - weight = Weight() - weight.bias = 1.0 - - for i in range(lod.max_bones_per_face): - # Skip the first one - if i == 0: - continue - # End If - - # There's 3 additional blends, - # If ... max_bones_per_face >= 2,3,4 - if lod.max_bones_per_face >= (i+1): - blend = unpack('f', f)[0] - weight.bias -= blend - - blend_weight = Weight() - blend_weight.bias = blend - weights.append(blend_weight) - # End If - # End For - - weights.append(weight) - - vertex.weights = weights - if mask & VTX_Normal: - vertex.normal = self._read_vector(f) - is_vertex_used = True - if mask & VTX_Colour: - vertex.colour = unpack('i', f)[0] - is_vertex_used = True - if mask & VTX_UV_Sets_1: - face_vertex.texcoord.xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_2: - face_vertex.extra_texcoords[0].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_3: - face_vertex.extra_texcoords[1].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_UV_Sets_4: - face_vertex.extra_texcoords[2].xy = unpack('2f', f) - is_face_vertex_used = True - if mask & VTX_BasisVector: - vertex.s = self._read_vector(f) - vertex.t = self._read_vector(f) - is_vertex_used = True - # End If - - if is_vertex_used: - lod.vertices.append(vertex) - - if is_face_vertex_used: - face_vertex_list.append(face_vertex) - - # End For - # End For + face_vertex = FaceVertex() - # Make sure our stuff is good!! - print ("Vert Count Check: %d/%d" % (lod.vert_count, len(lod.vertices))) - assert(lod.vert_count == len(lod.vertices)) + vertex.location = self._read_vector(f) + vertex.normal = self._read_vector(f) - # We need a "global" face, we'll fill it and re-use it. - face = Face() - for _ in range(lod.face_count * 3): - vertex_index = unpack('H', f)[0] + face_vertex.texcoord.xy = self._unpack('2f', f) - face_vertex = face_vertex_list[vertex_index] - face_vertex.vertex_index = vertex_index + lod.vertices.append(vertex) + #lod.faces.append(face_vertex) - # If we have room, append! - if len(face.vertices) < 3: - face.vertices.append(face_vertex) - # End If + # Test to see what these unk vectors could be! + # vertex.location = self._read_vector(f) + # lod.vertices.append(vertex) + # vertex.location = self._read_vector(f) + # lod.vertices.append(vertex) + unk_vec_1 = self._read_vector(f) + unk_vec_2 = self._read_vector(f) - # If we're now over, then flush! - if len(face.vertices) >= 3: - lod.faces.append(face) - # Make a new face, and append our face vertex - face = Face() - # End If - # End For - # Make sure our stuff is good!! - print ("Face Count Check: %d/%d" % (lod.face_count, len(lod.faces))) - assert(lod.face_count == len(lod.faces)) + - bone_set_count = unpack('I', f)[0] + weight_info = self._unpack('4b', f) + unk_1 = self._unpack('I', f)[0] - for _ in range(bone_set_count): - index_start = unpack('H', f)[0] - index_count = unpack('H', f)[0] + return lod - bone_list = unpack('4B', f) + def _read_sub_lod(self, f): + lod = LOD() - # ??? - index_buffer_index = unpack('I', f)[0] + lod.distance = unpack('f', f)[0] + lod.texture_index = unpack('b', f)[0] + lod.translucent = unpack('b', f)[0] + lod.cast_shadow = unpack('b', f)[0] + lod.unk_1 = unpack('I', f)[0] + + return lod - # Okay, now we can fill up our node indexes! - for vertex_index in range(index_start, index_start + index_count): - vertex = lod.vertices[vertex_index] + def _read_lod(self, f): + lod = self._read_sub_lod(f) + lod.unk_2 = unpack('I', f)[0] - # We need to re-build the weight list for our vertex - weights = [] + return lod - for (index, bone_index) in enumerate(bone_list): - # If we've got an invalid bone (255) then ignore it - if bone_index == Invalid_Bone: - continue - # End If + def _read_piece(self, f): + piece = Piece() - vertex.weights[index].node_index = bone_index - # Keep this one! - weights.append(vertex.weights[index]) - # End For + debug_ftell = f.tell() - total = 0.0 - for weight in weights: - total += weight.bias + has_lods = unpack('I', f)[0] - assert(total != 0.0) + # Not quite sure this is right... + if has_lods == 0: + return piece - vertex.weights = weights - #End For - # End For + name_offset = unpack('I', f)[0] + piece.name = self._get_string_from_table(name_offset) + lod_count = unpack('I', f)[0] - return lod + piece.lods = [ self._read_lod(f) ] + + for _ in range(lod_count - 1): + piece.lods.append(self._read_sub_lod(f)) - def _read_lod(self, f): - lod = LOD() + # Unknown values! - lod.texture_count = unpack('I', f)[0] - lod.textures = unpack('4I', f) - lod.render_style = unpack('I', f)[0] - lod.render_priority = unpack('b', f)[0] + unk_1 = unpack('I', f)[0] + unk_2 = unpack('I', f)[0] + unk_3 = unpack('I', f)[0] - lod.type = unpack('I', f)[0] + unk_4 = unpack('I', f)[0] + unk_5 = unpack('I', f)[0] - # Check if it's a null mesh, it skips a lot of the data... - if lod.type == LTB_Type_Null_Mesh: - # Early return here, because there's no more data... - lod = self._read_null_mesh(lod, f) - else: - # Some common data - obj_size = unpack('I', f)[0] - lod.vert_count = unpack('I', f)[0] - lod.face_count = unpack('I', f)[0] - lod.max_bones_per_face = unpack('I', f)[0] - lod.max_bones_per_vert = unpack('I', f)[0] - - if lod.type == LTB_Type_Rigid_Mesh: - lod = self._read_rigid_mesh(lod, f) - elif lod.type == LTB_Type_Skeletal_Mesh: - lod = self._read_skeletal_mesh(lod, f) + # End unknown values - nodes_used_count = unpack('B', f)[0] - nodes_used = [unpack('B', f)[0] for _ in range(nodes_used_count)] + piece.material_index = unpack('I', f)[0] - return lod + debug_ftell = f.tell() - def _read_piece(self, f): - piece = Piece() + data_length = unpack('I', f)[0] + index_times_two = unpack('I', f)[0] - piece.name = self._read_string(f) - lod_count = unpack('I', f)[0] - piece.lod_distances = [unpack('f', f)[0] for _ in range(lod_count)] - piece.lod_min = unpack('I', f)[0] - piece.lod_max = unpack('I', f)[0] - piece.lods = [self._read_lod(f) for _ in range(lod_count)] + print("Face Count ", data_length / 64) - # Just use the first LODs first texture - if lod_count > 0: - piece.material_index = piece.lods[0].textures[0] + for _ in range( int(data_length / 64) ): + piece.lods[0] = self._read_mesh_data(piece.lods[0], f) return piece @@ -554,10 +357,16 @@ def _read_animation(self, f): def _read_socket(self, f): socket = Socket() socket.node_index = unpack('I', f)[0] - socket.name = self._read_string(f) + + name_offset = self._unpack('I', f)[0] + socket.name = self._get_string_from_table(name_offset) + socket.rotation = self._read_quaternion(f) socket.location = self._read_vector(f) - socket.scale = self._read_vector(f) + + # Only one float! + socket.scale = self._unpack('f', f)[0] + return socket def _read_anim_binding(self, f): @@ -595,11 +404,30 @@ def _read_anim_info(self, f): def _read_weight_set(self, f): weight_set = WeightSet() - weight_set.name = self._read_string(f) - node_count = unpack('I', f)[0] - weight_set.node_weights = [unpack('f', f)[0] for _ in range(node_count)] + name_offset = unpack('I', f)[0] + weight_set.name = self._get_string_from_table(name_offset) + + unk_1 = unpack('I', f)[0] + weight_set.node_weights = [unpack('f', f)[0] for _ in range(self.node_count)] + return weight_set + def _read_physics(self, f): + physics = Physics() + physics.vis_node_index = self._unpack('I', f)[0] + physics.vis_radius = self._unpack('f', f)[0] + physics.unk_1 = self._unpack('I', f)[0] + + if physics.unk_1 > 0: + physics.unk_flag = self._unpack('b', f)[0] + physics.unk_offset = self._read_vector(f) + # End If + + physics.unk_2 = self._unpack('I', f)[0] + physics.weight_set_count = self._unpack('I', f)[0] + physics.weight_sets = [self._unpack('I', f)[0] for _ in range(physics.weight_set_count)] + + return physics def _read_flag(self, is_location, current_track, data_length): # Location data (Not Compressed and Compressed) @@ -843,114 +671,151 @@ def from_file(self, path): #model.anim_bindings = [self._read_anim_binding(f) for _ in range(animation_binding_count)] anim_infos = [self._read_anim_info(f) for _ in range(animation_count)] - animation_binding_position = f.tell() - f.seek(animation_position, 0) + weight_set_count = unpack('I', f)[0] + model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] + + # Only read animations we want toooo! + # This is disabled for test use! + if self._read_animations: + animation_binding_position = f.tell() + f.seek(animation_position, 0) - ######################################################################### - # Animation Pass + ######################################################################### + # Animation Pass - # Special case read - locations = [] - rotations = [] + # Special case read + locations = [] + rotations = [] - default_locations = [] - default_rotations = [] + default_locations = [] + default_rotations = [] - # Note: Defaults should be the node transform values, not Vector(0,0,0) for example. + # Note: Defaults should be the node transform values, not Vector(0,0,0) for example. - for node in model.nodes: - default_locations.append(node.location) - default_rotations.append(node.rotation) + for node in model.nodes: + default_locations.append(node.location) + default_rotations.append(node.rotation) - def decompress_vec(compressed_vec): - for i in range(len(compressed_vec)): - if compressed_vec[i] != 0: - compressed_vec[i] /= 64.0 + def decompress_vec(compressed_vec): + for i in range(len(compressed_vec)): + if compressed_vec[i] != 0: + compressed_vec[i] /= 64.0 - return Vector( compressed_vec ) + return Vector( compressed_vec ) - # Not really it, but a starting point! - def decompres_quat(compresed_quat): - # Find highest number, assume that's 1.0 - largest_number = -1 - for quat in compresed_quat: - if quat > largest_number: - largest_number = quat + # Not really it, but a starting point! + def decompres_quat(compresed_quat): + # Find highest number, assume that's 1.0 + largest_number = -1 + for quat in compresed_quat: + if quat > largest_number: + largest_number = quat - for i in range(len(compresed_quat)): - if compresed_quat[i] != 0: - compresed_quat[i] /= largest_number + for i in range(len(compresed_quat)): + if compresed_quat[i] != 0: + compresed_quat[i] /= largest_number - return Quaternion( compresed_quat ) + return Quaternion( compresed_quat ) - # Small helper function - def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, node_index): - if keyframe_index == 0: - return defaults_list[node_index] + # Small helper function + def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, node_index): + if keyframe_index == 0: + return defaults_list[node_index] - transform = keyframe_list[ keyframe_index - 1 ] + transform = keyframe_list[ keyframe_index - 1 ] - if flag_type == 'location': - return transform.location - - return transform.rotation - + if flag_type == 'location': + return transform.location - # Should match up with animation count... - for anim_info in anim_infos: - # For ... { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } - - section = animation_schemas[anim_info.binding.animation_header_index] - - for keyframe_index in range(anim_info.animation.keyframe_count): - section_index = 0 - for node_index in range(self.node_count): - - # Make sure we have space here... - try: - anim_info.animation.node_keyframe_transforms[node_index] - except: - anim_info.animation.node_keyframe_transforms.append([]) - - transform = Animation.Keyframe.Transform() + return transform.rotation + - # Flags are per keyframe - flags = [ section[ section_index ], section[ section_index + 1 ] ] - section_index += 2 + # Should match up with animation count... + for anim_info in anim_infos: + # For ... { 'type': 'location', 'track': current_track, 'process': ANIM_No_Compression } - # Let's assume that it's always Location/Rotation - for flag in flags: - debug_ftell = f.tell() + section = animation_schemas[anim_info.binding.animation_header_index] + + for keyframe_index in range(anim_info.animation.keyframe_count): + section_index = 0 + for node_index in range(self.node_count): + + # Make sure we have space here... + try: + anim_info.animation.node_keyframe_transforms[node_index] + except: + anim_info.animation.node_keyframe_transforms.append([]) - process = flag['process'] - - if flag['type'] == 'location': - if process == ANIM_No_Compression: - transform.location = self._read_vector(f) - elif process == ANIM_Compression: - transform.location = decompress_vec(self._read_short_vector(f)) - elif process == ANIM_Carry_Over: - transform.location = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_locations, keyframe_index, node_index ) - else: - if process == ANIM_Compression: - transform.rotation = decompres_quat(self._read_short_quaternion(f)) - elif process == ANIM_Carry_Over: - transform.rotation = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_rotations, keyframe_index, node_index ) - # End For (Flag) - - # Insert the transform - anim_info.animation.node_keyframe_transforms[node_index].append(transform) + transform = Animation.Keyframe.Transform() + + # Flags are per keyframe + flags = [ section[ section_index ], section[ section_index + 1 ] ] + section_index += 2 + + # Let's assume that it's always Location/Rotation + for flag in flags: + debug_ftell = f.tell() + + process = flag['process'] + + if flag['type'] == 'location': + if process == ANIM_No_Compression: + transform.location = self._read_vector(f) + elif process == ANIM_Compression: + transform.location = decompress_vec(self._read_short_vector(f)) + elif process == ANIM_Carry_Over: + transform.location = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_locations, keyframe_index, node_index ) + else: + if process == ANIM_Compression: + transform.rotation = decompres_quat(self._read_short_quaternion(f)) + elif process == ANIM_Carry_Over: + transform.rotation = handle_carry_over( flag['type'], anim_info.animation.node_keyframe_transforms[node_index], default_rotations, keyframe_index, node_index ) + # End For (Flag) + + # Insert the transform + anim_info.animation.node_keyframe_transforms[node_index].append(transform) + + # End For (Node) + # End For (Keyframe) - # End For (Node) - # End For (Keyframe) + model.animations.append(anim_info.animation) + - model.animations.append(anim_info.animation) - + # End Pass + ######################################################################### + # End If - # End Pass - ######################################################################### + # + # Sockets + # + socket_count = unpack('I', f)[0] + model.sockets = [self._read_socket(f) for _ in range(socket_count)] + # + # Child Models + # + child_model_count = unpack('I', f)[0] + model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] + + debug_ftell = f.tell() + + # Small flag determines if we excluded geometry on compile! + has_geometry = unpack('b', f)[0] + + # No geomtry? Then let's exit! + if has_geometry == 0: + return model + + # + # Physics + # + model.physics = self._read_physics(f) + + # + # Pieces + # + model.pieces = [self._read_piece(f) for _ in range(piece_count)] return model #old From dec9462031365b676b5fdef093b6dcbc5762b94e Mon Sep 17 00:00:00 2001 From: Jake Date: Mon, 8 Feb 2021 19:59:02 -0800 Subject: [PATCH 13/24] Mesh data now reads for the most part. No weight information yet, and only really works on rigid meshes. --- research/pc_model00p.bt | 7 ++- src/reader_model00p_pc.py | 115 +++++++++++++++++++++++++++++--------- 2 files changed, 93 insertions(+), 29 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 266429e..482e383 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -352,8 +352,9 @@ struct MeshHeader { // DataLength / 64 = MeshData int DataLength; // DataCount / 2 = tri-fs count - int DataCount; + int IndexListLength; Mesh MeshData[DataLength/64]; + short IndexList[IndexListLength/2]; // DataCount in bytes }; struct ContainsGeometry { @@ -383,11 +384,11 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - //AnimSchema skip[8]; // Replace this with the schema length + AnimSchema skip[8]; // Replace this with the schema length // DELTA //AnimSchema skip[652]; // delta 652 // ROPE - AnimSchema skip[32]; + //AnimSchema skip[32]; // Condemned Default00p //AnimSchema skip[8]; } else { diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 67a6090..47b8c63 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -47,6 +47,29 @@ def __init__(self): # Temp until I can figure out how animations work! self._read_animations = False + # Helper class for reading in mesh data + # TODO: Need to clean up and move into a model class whenever I get to that. + class MeshData(object): + def __init__(self): + self.vertex = Vector() + self.normal = Vector() + self.uvs = Vector() + self.unk_1 = Vector() + self.unk_2 = Vector() + self.weight_info = [] + self.unk_3 = -1 + + def read(self, reader, f): + self.vertex = reader._read_vector(f) + self.normal = reader._read_vector(f) + self.uvs.xy = reader._unpack('2f', f) + self.unk_1 = reader._read_vector(f) + self.unk_2 = reader._read_vector(f) + self.weight_info = reader._unpack('4b', f) + self.unk_3 = reader._unpack('I', f)[0] + + return self + # # Wrapper around .io.unpack that can eventually handle big-endian reads. # @@ -133,33 +156,74 @@ def _read_null_mesh(self, lod, f): f.seek(4, 1) return lod - def _read_mesh_data(self, lod, f): + # def _read_mesh_data(self, lod, f): - vertex = Vertex() - face = Face() - face_vertex = FaceVertex() + # vertex = Vertex() + # face = Face() + # face_vertex = FaceVertex() - vertex.location = self._read_vector(f) - vertex.normal = self._read_vector(f) + # vertex.location = self._read_vector(f) + # vertex.normal = self._read_vector(f) - face_vertex.texcoord.xy = self._unpack('2f', f) + # face_vertex.texcoord.xy = self._unpack('2f', f) - lod.vertices.append(vertex) - #lod.faces.append(face_vertex) + # lod.vertices.append(vertex) + # #lod.faces.append(face_vertex) - # Test to see what these unk vectors could be! - # vertex.location = self._read_vector(f) - # lod.vertices.append(vertex) - # vertex.location = self._read_vector(f) - # lod.vertices.append(vertex) - unk_vec_1 = self._read_vector(f) - unk_vec_2 = self._read_vector(f) + # # Test to see what these unk vectors could be! + # # vertex.location = self._read_vector(f) + # # lod.vertices.append(vertex) + # # vertex.location = self._read_vector(f) + # # lod.vertices.append(vertex) + # unk_vec_1 = self._read_vector(f) + # unk_vec_2 = self._read_vector(f) - weight_info = self._unpack('4b', f) - unk_1 = self._unpack('I', f)[0] + # weight_info = self._unpack('4b', f) + # unk_1 = self._unpack('I', f)[0] + + # return lod + + + + def _read_mesh_data(self, f): + data_length = self._unpack('I', f)[0] + index_list_length = self._unpack('I', f)[0] + + print("Face Count ", data_length / 64) + + # Data Length / Structure Size + mesh_data_list = [ self.MeshData().read(self, f) for _ in range( int(data_length / 64) ) ] + index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] + + # Holds 3 face vertices + face = Face() + lod = LOD() + + real_index = 0 + + for index in index_list: + mesh_data = mesh_data_list[index] + + vertex = Vertex() + vertex.location = mesh_data.vertex + vertex.normal = mesh_data.normal + lod.vertices.append(vertex) + + face_vertex = FaceVertex() + face_vertex.texcoord = mesh_data.uvs + face_vertex.vertex_index = real_index + + face.vertices.append(face_vertex) + + if len(face.vertices) == 3: + lod.faces.append(face) + face = Face() + + real_index += 1 + # End For return lod @@ -215,13 +279,15 @@ def _read_piece(self, f): debug_ftell = f.tell() - data_length = unpack('I', f)[0] - index_times_two = unpack('I', f)[0] + #data_length = unpack('I', f)[0] + #index_list_length = unpack('I', f)[0] - print("Face Count ", data_length / 64) + #print("Face Count ", data_length / 64) + + #for _ in range( int(data_length / 64) ): + # piece.lods[0] = self._read_mesh_data(piece.lods[0], f) - for _ in range( int(data_length / 64) ): - piece.lods[0] = self._read_mesh_data(piece.lods[0], f) + piece.lods[0] = self._read_mesh_data(f) return piece @@ -465,9 +531,6 @@ def _read_animation_schema(self, f): total_track_size = track_1_size + track_2_size - # Safety, this shouldn't happen! - assert(total_track_size != 0) - # By default start on track 1 current_track = 1 From e68a3d9ee1249ecdf42254128e3a7e519d09363f Mon Sep 17 00:00:00 2001 From: Jake Date: Mon, 8 Feb 2021 22:03:49 -0800 Subject: [PATCH 14/24] Bunch more research on figuring out LOD and physics info --- research/pc_model00p.bt | 99 ++++++++++++++++++++++++++++++--------- src/abc.py | 34 +++++++++++--- src/reader_model00p_pc.py | 44 +++++++++++++---- 3 files changed, 140 insertions(+), 37 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 482e383..9352d3c 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -269,21 +269,69 @@ struct ChildModelHeader { ChildModel ChildModels[ChildModelCount - 1] ; }; -struct PhysicsWeightSet { +struct NodeWeight { + char Physics; + // Differences from 1.0/0.0? + float VelocityGain; + float HierachyGain; +}; + +struct PhysicsWeightSet (int Count) { int NameOffset; + NodeWeight NodeWeights[Count]; +}; + +struct PhysicsShape { + char ShapeIndex; + LTVector Offset; + LTQuat Orientation; + + float Cor; + float Friction; + + int CollisionGroup; // Maybe CollisionGroup + int NodeIndex; + + float Mass; + float Density; // This can be scaled per shape! + float Radius; + + // If Orientation isn't 0,0,0,1 then it's not a sphere! + // Probably a capsule, haven't ran into anyother shapes yet tho. + if (Orientation.w != 1.0) { + int Unk1; + float LengthPt1; + float Unk2[2]; + float LengthPt2; + int Unk3; + } +}; + +struct PhysicsConstraintData { + int Type; + int ShapeIndex; + int VertCount; + //int Shape2Index; + if (Type == 4) { // Ragdoll constraint + float Data[24]; + float Friction; + } else if (Type == 3) { // Limited Hinge Constraint + float Data[18]; + float Friction; + float Unk1; + float Unk2; + } }; struct PhysicsHeader { int VisNode; float VisRadius; - int Unk1; - if (Unk1 > 0) { - char UnkFlag; - LTVector Offset; - } - int Unk2; + int ShapeCount; + PhysicsShape Shapes[ShapeCount] ; + int ConstraintCount; + PhysicsConstraintData ConstraintData[ConstraintCount] ; int WeightSetCount; - PhysicsWeightSet WeightSets[WeightSetCount] ; + PhysicsWeightSet WeightSets(ShapeCount)[WeightSetCount] ; }; struct AnimationWeightSet (int NodeCount) { @@ -298,16 +346,16 @@ struct AnimationWeightSetHeader (int NodeCount) { AnimationWeightSet WeightSets(NodeCount)[Count] ; }; -struct LODGroup { +struct LODInfo { float LodDistance; char TextureIndex; char Translucent; char CastShadow; - int Unk1; - int Unk2; + int PieceCount; + int PieceIndexes[PieceCount]; }; -struct SubLODGroup { +struct SubLODInfo { float LodDistance; char TextureIndex; char Translucent; @@ -315,14 +363,15 @@ struct SubLODGroup { int Unk1; }; +struct LODGroup { + int NameOffset; + int LODCount; + LODInfo LODInfoData[LODCount] ; +}; + struct LODGroupHeader { - int HasLODs; - if (HasLODs > 0) { - int NameOffset; - int LODCount; - LODGroup LODGroups; - SubLODGroup SubLODs[ LODCount - 1 ] ; - } + int LODGroupCount; + LODGroup Groups[LODGroupCount] ; }; struct Unk2 { @@ -374,6 +423,10 @@ struct SocketHeader { Socket Sockets[SocketCount] ; }; +struct CondemnedUnk { + int Unk; +}; + Header hdr; // String database, we need to skip it for now @@ -384,9 +437,9 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - AnimSchema skip[8]; // Replace this with the schema length + //AnimSchema skip[8]; // Replace this with the schema length // DELTA - //AnimSchema skip[652]; // delta 652 + AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p @@ -402,6 +455,10 @@ AnimationWeightSetHeader animWeightSetHeader(hdr.NodeCount); SocketHeader socketHeader; +if (hdr.Version == 34) { + CondemnedUnk unk; +} + ChildModelHeader childModels; ContainsGeometry hasGeometry; diff --git a/src/abc.py b/src/abc.py index 4ceae95..412da2d 100644 --- a/src/abc.py +++ b/src/abc.py @@ -251,17 +251,37 @@ def __init__(self): # # Model00p+ specific # +class PhysicsShape(object): + def __init__(self): + self.index = 0 + self.offset = Vector() + self.orientation = Quaternion() + self.cor = 0.0 + self.friction = 0.0 + self.collision_group = 0 + self.node_index = 0 + self.mass = 0.0 + self.density = 0.0 # Scaled + self.radius = 0.0 + + # Capsule specific + # If Orientation.w != 0.0 + self.unk_1 = 0 + self.length_pt1 = 0.0 + self.unk_2 = 0 + self.unk_3 = 0 + self.length_pt2 = 0.0 + self.unk_4 = 0 + # End If + class Physics(object): def __init__(self): self.vis_node_index = 0 self.vis_radius = 0.0 - # Unk - self.unk_1 = 0 - # If Unk1 > 0 ? - self.unk_flag = 0 - self.unk_offset = Vector() - # End If + # Physics Shapes + self.shape_count = 0 + self.shapes = [] self.unk_2 = 0 self.weight_set_count = 0 @@ -295,7 +315,7 @@ def __init__(self): # LTB specific # Model00p specific - self.physics = None + self.physics = Physics() @property diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 47b8c63..7d09ccc 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -478,16 +478,42 @@ def _read_weight_set(self, f): return weight_set + def _read_physics_shape(self, f): + shape = PhysicsShape() + + shape.index = self._unpack('b', f)[0] + shape.offset = self._read_vector(f) + shape.orientation = self._read_quaternion(f) + shape.cor = self._unpack('f', f)[0] + shape.friction = self._unpack('f', f)[0] + shape.collision_group = self._unpack('I', f)[0] + shape.node_index = self._unpack('I', f)[0] + shape.mass = self._unpack('f', f)[0] + shape.density = self._unpack('f', f)[0] + shape.radius = self._unpack('f', f)[0] + + # Capsule specific + # Since sphere doesn't have orientation data, this works? + if shape.orientation.w != 1.0: + shape.unk_1 = self._unpack('I', f)[0] + shape.length_pt1 = self._unpack('f', f)[0] + shape.unk_2 = self._unpack('I', f)[0] + shape.unk_2 = self._unpack('I', f)[0] + shape.length_pt1 = self._unpack('f', f)[0] + shape.unk_2 = self._unpack('I', f)[0] + # End If + + return shape + def _read_physics(self, f): physics = Physics() physics.vis_node_index = self._unpack('I', f)[0] physics.vis_radius = self._unpack('f', f)[0] - physics.unk_1 = self._unpack('I', f)[0] + physics.shape_count = self._unpack('I', f)[0] - if physics.unk_1 > 0: - physics.unk_flag = self._unpack('b', f)[0] - physics.unk_offset = self._read_vector(f) - # End If + physics.shapes = [ self._read_physics_shape(f) for _ in range(physics.shape_count) ] + + return physics physics.unk_2 = self._unpack('I', f)[0] physics.weight_set_count = self._unpack('I', f)[0] @@ -734,7 +760,7 @@ def from_file(self, path): #model.anim_bindings = [self._read_anim_binding(f) for _ in range(animation_binding_count)] anim_infos = [self._read_anim_info(f) for _ in range(animation_count)] - weight_set_count = unpack('I', f)[0] + weight_set_count = self._unpack('I', f)[0] model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] # Only read animations we want toooo! @@ -852,19 +878,19 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # # Sockets # - socket_count = unpack('I', f)[0] + socket_count = self._unpack('I', f)[0] model.sockets = [self._read_socket(f) for _ in range(socket_count)] # # Child Models # - child_model_count = unpack('I', f)[0] + child_model_count = self._unpack('I', f)[0] model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] debug_ftell = f.tell() # Small flag determines if we excluded geometry on compile! - has_geometry = unpack('b', f)[0] + has_geometry = self._unpack('b', f)[0] # No geomtry? Then let's exit! if has_geometry == 0: From 048045ab902772488bfa717327fa2ebc2d10d85b Mon Sep 17 00:00:00 2001 From: Jake Date: Mon, 8 Feb 2021 22:34:38 -0800 Subject: [PATCH 15/24] Mesh data is now reading properly for some character meshes. Although it reads everything at once. There must be a "Piece Starts Here" int somewhere.. --- research/pc_model00p.bt | 2 +- src/abc.py | 39 +++++++++++++-- src/reader_model00p_pc.py | 99 +++++++++++++++++++++++++++------------ 3 files changed, 103 insertions(+), 37 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 9352d3c..a4f6dd8 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -392,7 +392,7 @@ struct Mesh { LTVector Unk1; LTVector Unk4; uchar WeightInfo[4]; - int Unk3; + uchar NodeIndexes[4]; }; struct MeshHeader { diff --git a/src/abc.py b/src/abc.py index 412da2d..d23b256 100644 --- a/src/abc.py +++ b/src/abc.py @@ -98,8 +98,8 @@ def __init__(self): self.texture_index = 0 self.translucent = 0 self.cast_shadow = 0 - self.unk_1 = 0 - self.unk_2 = 0 + self.piece_count = 0 + self.piece_index_list = [] def get_face_vertices(self, face_index): return [self.vertices[vertex.vertex_index] for vertex in self.faces[face_index].vertices] @@ -274,6 +274,33 @@ def __init__(self): self.unk_4 = 0 # End If +class PhysicsConstraint(object): + TYPE_LIMITED_HINGE = 3 + TYPE_RAGDOLL = 4 + + def __init__(self): + self.type = 0 + self.shape_index = 0 + self.unk_1 = 0 + self.data = [] # Length: Type 3 == 18, Type 4 == 24 + self.friction = 0.0 + # If Type == 3 + self.unk_2 = 0.0 + self.unk_3 = 0.0 + # End If + +class PhysicsNodeWeights(object): + def __init__(self): + self.physics = 0 + self.velocity_gain = 1.0 + self.hiearchy_gain = 0.0 + +class PhysicsWeightSet(object): + def __init__(self): + self.name = "" + self.node_weights = [] #...PhysicsNodeWeights + + class Physics(object): def __init__(self): self.vis_node_index = 0 @@ -281,11 +308,13 @@ def __init__(self): # Physics Shapes self.shape_count = 0 - self.shapes = [] + self.shapes = [] #...PhysicsShape + + self.constraint_count = 0 + self.contraints = [] #...PhysicsConstraint - self.unk_2 = 0 self.weight_set_count = 0 - self.weight_sets = [] # Array of name offsets + self.weight_sets = [] # ...PhysicsWeightSet class Model(object): def __init__(self): diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 7d09ccc..09451ee 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -227,45 +227,42 @@ def _read_mesh_data(self, f): return lod - def _read_sub_lod(self, f): + def _read_lod(self, f): lod = LOD() - lod.distance = unpack('f', f)[0] - lod.texture_index = unpack('b', f)[0] - lod.translucent = unpack('b', f)[0] - lod.cast_shadow = unpack('b', f)[0] - lod.unk_1 = unpack('I', f)[0] + lod.distance = self._unpack('f', f)[0] + lod.texture_index = self._unpack('b', f)[0] + lod.translucent = self._unpack('b', f)[0] + lod.cast_shadow = self._unpack('b', f)[0] + lod.piece_count = self._unpack('I', f)[0] + + lod.piece_index_list = [ self._unpack('I', f)[0] for _ in range(lod.piece_count) ] return lod - def _read_lod(self, f): - lod = self._read_sub_lod(f) - lod.unk_2 = unpack('I', f)[0] + def _read_lod_group(self, f): + piece = Piece() - return lod + name_offset = unpack('I', f)[0] + piece.name = self._get_string_from_table(name_offset) + lod_count = unpack('I', f)[0] - def _read_piece(self, f): - piece = Piece() + piece.lods = [ self._read_lod(f) for _ in range(lod_count) ] + return piece + + def _read_pieces(self, f): debug_ftell = f.tell() - has_lods = unpack('I', f)[0] + lod_group_count = unpack('I', f)[0] # Not quite sure this is right... - if has_lods == 0: - return piece - - name_offset = unpack('I', f)[0] - piece.name = self._get_string_from_table(name_offset) - lod_count = unpack('I', f)[0] + if lod_group_count == 0: + return [] - piece.lods = [ self._read_lod(f) ] - - for _ in range(lod_count - 1): - piece.lods.append(self._read_sub_lod(f)) + pieces = [ self._read_lod_group(f) for _ in range(lod_group_count) ] # Unknown values! - unk_1 = unpack('I', f)[0] unk_2 = unpack('I', f)[0] unk_3 = unpack('I', f)[0] @@ -275,7 +272,7 @@ def _read_piece(self, f): # End unknown values - piece.material_index = unpack('I', f)[0] + pieces[0].material_index = unpack('I', f)[0] debug_ftell = f.tell() @@ -287,9 +284,9 @@ def _read_piece(self, f): #for _ in range( int(data_length / 64) ): # piece.lods[0] = self._read_mesh_data(piece.lods[0], f) - piece.lods[0] = self._read_mesh_data(f) + pieces[0].lods[0] = self._read_mesh_data(f) - return piece + return pieces def _read_node(self, f): node = Node() @@ -505,6 +502,46 @@ def _read_physics_shape(self, f): return shape + def _read_physics_constraint(self, f): + constraint = PhysicsConstraint() + + constraint.type = self._unpack('I', f)[0] + constraint.shape_index = self._unpack('I', f)[0] + constraint.unk_1 = self._unpack('I', f)[0] + + data_length = 24 + + if constraint.type == 3: + data_length = 18 + + constraint.data = [ self._unpack('f', f)[0] for _ in range(data_length) ] + + constraint.friction = self._unpack('I', f)[0] + + if constraint.type == 3: + constraint.unk_2 = self._unpack('I', f)[0] + constraint.unk_3 = self._unpack('I', f)[0] + + return constraint + + def _read_physics_node_weight(self, f): + node_set = PhysicsNodeWeights() + node_set.physics = self._unpack('b', f)[0] + node_set.velocity_gain = self._unpack('f', f)[0] + node_set.hiearchy_gain = self._unpack('f', f)[0] + return node_set + + + def _read_physics_weights(self, shape_count, f): + weight_set = PhysicsWeightSet() + + name_offset = self._unpack('I', f)[0] + weight_set.name = self._get_string_from_table(name_offset) + + weight_set.node_weights = [ self._read_physics_node_weight(f) for _ in range(shape_count) ] + + return weight_set + def _read_physics(self, f): physics = Physics() physics.vis_node_index = self._unpack('I', f)[0] @@ -513,11 +550,11 @@ def _read_physics(self, f): physics.shapes = [ self._read_physics_shape(f) for _ in range(physics.shape_count) ] - return physics + physics.constraint_count = self._unpack('I', f)[0] + physics.contraints = [ self._read_physics_constraint(f) for _ in range(physics.constraint_count) ] - physics.unk_2 = self._unpack('I', f)[0] physics.weight_set_count = self._unpack('I', f)[0] - physics.weight_sets = [self._unpack('I', f)[0] for _ in range(physics.weight_set_count)] + physics.weight_sets = [ self._read_physics_weights(physics.shape_count, f) for _ in range(physics.weight_set_count)] return physics @@ -904,7 +941,7 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # # Pieces # - model.pieces = [self._read_piece(f) for _ in range(piece_count)] + model.pieces = self._read_pieces(f) #[self._read_pieces(f) for _ in range(piece_count)] return model #old From b9eefe94545b19cc8c16fad3b341f0361fc1edbf Mon Sep 17 00:00:00 2001 From: Jake Date: Mon, 8 Feb 2021 23:02:14 -0800 Subject: [PATCH 16/24] Clean up --- research/pc_model00p.bt | 7 +++++-- src/reader_model00p_pc.py | 16 +++++----------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index a4f6dd8..5de1777 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -379,7 +379,7 @@ struct Unk2 { int PieceCount; int Unk5; // LOD Related? int Unk6; - int Unk7; + int LODCount; if (PieceCount == 0) { LTString MeshFile; } @@ -468,4 +468,7 @@ PhysicsHeader PhysicsInfo; LODGroupHeader lodGroupHeader; Unk2 unk2; -MeshHeader meshHeader; \ No newline at end of file +MeshHeader meshHeader; +FSkip(420); // DeltaForce +MeshHeader meshHeader2; +// LOD info at the bottom, and then mesh data continues per piece \ No newline at end of file diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 09451ee..c54f9fc 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -273,19 +273,13 @@ def _read_pieces(self, f): # End unknown values pieces[0].material_index = unpack('I', f)[0] - - debug_ftell = f.tell() - - #data_length = unpack('I', f)[0] - #index_list_length = unpack('I', f)[0] - - #print("Face Count ", data_length / 64) - - #for _ in range( int(data_length / 64) ): - # piece.lods[0] = self._read_mesh_data(piece.lods[0], f) - pieces[0].lods[0] = self._read_mesh_data(f) + # DeltaForce has extra data below! + # f.seek(420, 1) + # pieces[1].material_index = unpack('I', f)[0] + # pieces[1].lods[0] = self._read_mesh_data(f) + return pieces def _read_node(self, f): From fa9c746c280fdd4af3896322b060f61917fc13a5 Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 9 Feb 2021 19:28:28 -0800 Subject: [PATCH 17/24] Determine mesh info, and clean up --- research/pc_model00p.bt | 30 ++++++++-- src/reader_model00p_pc.py | 116 +------------------------------------- 2 files changed, 29 insertions(+), 117 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 5de1777..be7a22a 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -427,6 +427,27 @@ struct CondemnedUnk { int Unk; }; +// Related to Deformers +struct MeshInfo { + int IndexListStart; + int IndexListCount; + int Unk[3]; + int TriangleCount; + int MaterialIndex; + int InfluenceCount; + int Unk; + char NodeIndexes[InfluenceCount]; +}; + +struct AfterMesh { + int Unk1[4]; + short ShortData[24]; + int Unk3[2]; + // Mesh Info? + int MeshInfoCount; + MeshInfo Info[MeshInfoCount] ; +}; + Header hdr; // String database, we need to skip it for now @@ -437,9 +458,9 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - //AnimSchema skip[8]; // Replace this with the schema length + AnimSchema skip[12]; // Replace this with the schema length // DELTA - AnimSchema skip[652]; // delta 652 + //AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p @@ -469,6 +490,7 @@ PhysicsHeader PhysicsInfo; LODGroupHeader lodGroupHeader; Unk2 unk2; MeshHeader meshHeader; -FSkip(420); // DeltaForce -MeshHeader meshHeader2; +AfterMesh Debug; +//FSkip(420); // DeltaForce +//MeshHeader meshHeader2; // LOD info at the bottom, and then mesh data continues per piece \ No newline at end of file diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index c54f9fc..0f3791e 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -935,116 +935,6 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # # Pieces # - model.pieces = self._read_pieces(f) #[self._read_pieces(f) for _ in range(piece_count)] - - return model -#old - # # - # # HEADER - # # - # file_format = unpack('H', f)[0] - # file_version = unpack('H', f)[0] - - # if file_type is not 1: - # raise Exception('Unsupported File Type! Only mesh LTB files are supported.') - # # End If - - # if file_version is not 9: - # raise Exception('Unsupported File Version! Importer currently only supports v9.') - # # End If - - # # Skip 4 ints - # f.seek(4 * 4, 1) - - # self.version = unpack('i', f)[0] - - # if self.version not in [23, 24, 25]: - # raise Exception('Unsupported file version ({}).'.format(self.version)) - # # End If - - # model.version = self.version - - # keyframe_count = unpack('i', f)[0] - # animation_count = unpack('i', f)[0] - # self.node_count = unpack('i', f)[0] - # piece_count = unpack('i', f)[0] - # child_model_count = unpack('i', f)[0] - # face_count = unpack('i', f)[0] - # vertex_count = unpack('i', f)[0] - # vertex_weight_count = unpack('i', f)[0] - # lod_count = unpack('i', f)[0] - # socket_count = unpack('i', f)[0] - # weight_set_count = unpack('i', f)[0] - # string_count = unpack('i', f)[0] - # string_length = unpack('i', f)[0] - # vertex_animation_data_size = unpack('i', f)[0] - # animation_data_size = unpack('i', f)[0] - - # model.command_string = self._read_string(f) - - # model.internal_radius = unpack('f', f)[0] - - # # - # # OBB Information - # # - # obb_count = unpack('i', f)[0] - - # obb_size = 64 - - # if self.version > 23: - # obb_size += 4 - - # # OBB information is a matrix per each node - # # We don't use it anywhere, so just skip it. - # f.seek(obb_size * obb_count, 1) - - # # - # # Pieces - # # - - # # Yep again! - # piece_count = unpack('i', f)[0] - # model.pieces = [self._read_piece(f) for _ in range(piece_count)] - - # # - # # Nodes - # # - # model.nodes = [self._read_node(f) for _ in range(self.node_count)] - # build_undirected_tree(model.nodes) - # weight_set_count = unpack('I', f)[0] - # model.weight_sets = [self._read_weight_set(f) for _ in range(weight_set_count)] - - # # - # # Child Models - # # - # child_model_count = unpack('I', f)[0] - # model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] - - # # - # # Animations - # # - # animation_count = unpack('I', f)[0] - # model.animations = [self._read_animation(f) for _ in range(animation_count)] - - # # - # # Sockets - # # - # socket_count = unpack('I', f)[0] - # model.sockets = [self._read_socket(f) for _ in range(socket_count)] - - # # - # # Animation Bindings - # # - # anim_binding_count = unpack('I', f)[0] - - # #model.anim_bindings = [self._read_anim_binding(f) for _ in range(anim_binding_count)] - - # for _ in range(anim_binding_count): - # # Some LTB animation binding information can be incorrect... - # # Almost like the mesh was accidentally cut off, very odd! - # try: - # model.anim_bindings.append(self._read_anim_binding(f)) - # except Exception: - # pass - - # return model + model.pieces = self._read_pieces(f) + + return model \ No newline at end of file From 425bfbed9981b9399a108f7d97a9476d4013fccd Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 9 Feb 2021 22:15:21 -0800 Subject: [PATCH 18/24] Importer now properly reads mesh data, and also bone weights are now processed. --- research/pc_model00p.bt | 8 +- src/reader_model00p_pc.py | 154 +++++++++++++++++++++++++++++++++++--- 2 files changed, 148 insertions(+), 14 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index be7a22a..d1c5295 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -391,8 +391,10 @@ struct Mesh { LTUV UVs; LTVector Unk1; LTVector Unk4; - uchar WeightInfo[4]; - uchar NodeIndexes[4]; + uchar WeightInfo[3]; + uchar Padding; + uchar NodeIndexes[3]; + uchar Padding2; }; struct MeshHeader { @@ -458,7 +460,7 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - AnimSchema skip[12]; // Replace this with the schema length + AnimSchema skip[16]; // Replace this with the schema length // DELTA //AnimSchema skip[652]; // delta 652 // ROPE diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 0f3791e..093180f 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -57,7 +57,7 @@ def __init__(self): self.unk_1 = Vector() self.unk_2 = Vector() self.weight_info = [] - self.unk_3 = -1 + self.node_indexes = [] def read(self, reader, f): self.vertex = reader._read_vector(f) @@ -65,8 +65,43 @@ def read(self, reader, f): self.uvs.xy = reader._unpack('2f', f) self.unk_1 = reader._read_vector(f) self.unk_2 = reader._read_vector(f) - self.weight_info = reader._unpack('4b', f) + self.weight_info = reader._unpack('3B', f) + padding = reader._unpack('B', f)[0] + self.node_indexes = reader._unpack('3B', f) + padding = reader._unpack('B', f)[0] + + # Reverse the weight info, I'm not sure why it's flipped... + #self.weight_info.reverse() + # It's a tuple actually... + self.weight_info = tuple(reversed(self.weight_info)) + + return self + + # Another helper class, this should be shoved into whatever I refactor into FEAR's model. + class MeshInfo(object): + def __init__(self): + self.index_list_start = 0 + self.index_list_count = 0 + self.unk_1 = 0 + self.unk_2 = 0 + self.unk_3 = 0 + self.triangle_count = 0 + self.material_index = 0 + self.influence_count = 0 + self.unk_4 = 0 + self.influence_node_indexes = [] + + def read(self, reader, f): + self.index_list_start = reader._unpack('I', f)[0] + self.index_list_count = reader._unpack('I', f)[0] + self.unk_1 = reader._unpack('I', f)[0] + self.unk_2 = reader._unpack('I', f)[0] self.unk_3 = reader._unpack('I', f)[0] + self.triangle_count = reader._unpack('I', f)[0] + self.material_index = reader._unpack('I', f)[0] + self.influence_count = reader._unpack('I', f)[0] + self.unk_4 = reader._unpack('I', f)[0] + self.influence_node_indexes = [ reader._unpack('b', f)[0] for _ in range(self.influence_count) ] return self @@ -188,7 +223,11 @@ def _read_null_mesh(self, lod, f): - def _read_mesh_data(self, f): + def _read_mesh_data(self, pieces, f): + debug_ftell = f.tell() + + texture_count = self._unpack('I', f)[0] + data_length = self._unpack('I', f)[0] index_list_length = self._unpack('I', f)[0] @@ -198,6 +237,105 @@ def _read_mesh_data(self, f): mesh_data_list = [ self.MeshData().read(self, f) for _ in range( int(data_length / 64) ) ] index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] + debug_ftell = f.tell() + + # Unknown after mesh data + # These seem to be the same, so asserts are here for debug. + unk_1 = self._unpack('I', f)[0] + assert(unk_1 == 1) + unk_2 = self._unpack('I', f)[0] + assert(unk_2 == 64) + unk_3 = self._unpack('I', f)[0] + assert(unk_3 == 0) + unk_4 = self._unpack('I', f)[0] + assert(unk_4 == 2) + + # 24 shorts, might have something to do with unk_2? + unk_short_list = self._unpack('24H', f) + + # More unknown data that seems to be the same + unk_5 = self._unpack('I', f)[0] + assert(unk_5 == 255) + unk_6 = self._unpack('I', f)[0] + assert(unk_6 == 17) + + # Okay here's the mesh info! + mesh_info_count = self._unpack('I', f)[0] + mesh_info = [ self.MeshInfo().read(self, f) for _ in range(mesh_info_count) ] + # End + + + # Some running totals + running_lod_index = 0 + running_index_list_index = 0 + for index in range(len(pieces)): + piece = pieces[index] + + for lod_index in range(len(piece.lods)): + lod = piece.lods[lod_index] + info = mesh_info[running_lod_index] + + + + + + # Set the material index (for the main lod only!) + if lod_index == 0: + piece.material_index = info.material_index + + for vertex_index in range(info.index_list_start, info.index_list_start + info.index_list_count): + mesh_data = mesh_data_list[ vertex_index ] + vertex = Vertex() + vertex.location = mesh_data.vertex + vertex.normal = mesh_data.normal + + # Weight info kinda sucks, if there's only one weight it's on position 3... + # Why? I don't know, I'm hoping I'm reading this data slightly wrong. + #if mesh_data.weight_info[2] == 255: + # weight = Weight() + index = 0 + for bias in mesh_data.weight_info: + if bias == 0: + continue + + weight = Weight() + weight.bias = bias / 255 + weight.node_index = info.influence_node_indexes[ mesh_data.node_indexes[index] ] + + vertex.weights.append(weight) + + index += 1 + + lod.vertices.append(vertex) + + # Holds 3 face vertices + face = Face() + + # Tri count * 3, because we're looping through the index list for the length of tri count. + # This may not entirely be correct, as IndexList / 3 sometimes does not equal triangle counts! + for index in range(info.triangle_count * 3): + face_vertex = FaceVertex() + face_vertex.texcoord = mesh_data.uvs + face_vertex.vertex_index = index_list[running_index_list_index] + + face_vertex.vertex_index -= info.index_list_start + + face.vertices.append(face_vertex) + + if len(face.vertices) == 3: + lod.faces.append(face) + face = Face() + + running_index_list_index += 1 + + + piece.lods[lod_index] = lod + running_lod_index += 1 + + return pieces + + # OLD + # Holds 3 face vertices face = Face() lod = LOD() @@ -272,13 +410,8 @@ def _read_pieces(self, f): # End unknown values - pieces[0].material_index = unpack('I', f)[0] - pieces[0].lods[0] = self._read_mesh_data(f) - - # DeltaForce has extra data below! - # f.seek(420, 1) - # pieces[1].material_index = unpack('I', f)[0] - # pieces[1].lods[0] = self._read_mesh_data(f) + # Read the mesh data, and process the pieces + pieces = self._read_mesh_data(pieces, f) return pieces @@ -788,7 +921,6 @@ def from_file(self, path): # Skip ahead to keyframes! f.seek(animation_data_length , 1) - #model.anim_bindings = [self._read_anim_binding(f) for _ in range(animation_binding_count)] anim_infos = [self._read_anim_info(f) for _ in range(animation_count)] weight_set_count = self._unpack('I', f)[0] From 9f2debd6faa4846531fbd208fd81ffefe4a1e313 Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 9 Feb 2021 22:46:44 -0800 Subject: [PATCH 19/24] Start researching Condemned's changes --- research/pc_model00p.bt | 50 +++++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index d1c5295..219c7a5 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -397,14 +397,33 @@ struct Mesh { uchar Padding2; }; -struct MeshHeader { +struct Meshv34 { + LTVector Vertex; + LTVector Normals; + LTUV UVs; + LTVector Unk1; + LTVector Unk4; + uchar Colour[4]; + uchar WeightInfo[3]; + uchar Padding; + uchar NodeIndexes[3]; + uchar Padding2; +}; + +struct MeshHeader (int Version) { int TextureCount; // DataLength / 64 = MeshData int DataLength; // DataCount / 2 = tri-fs count int IndexListLength; - Mesh MeshData[DataLength/64]; + + if (Version == 34) { + Meshv34 MeshData[DataLength/68]; + } else { + Mesh MeshData[DataLength/64]; + } + short IndexList[IndexListLength/2]; // DataCount in bytes }; @@ -436,15 +455,17 @@ struct MeshInfo { int Unk[3]; int TriangleCount; int MaterialIndex; - int InfluenceCount; + short InfluenceCount; int Unk; char NodeIndexes[InfluenceCount]; }; struct AfterMesh { - int Unk1[4]; - short ShortData[24]; - int Unk3[2]; + int Unk; + int ShortDataCount; + int Unk3; + int DataSize; // Maybe? + short ShortData[ShortDataCount / 2]; // Mesh Info? int MeshInfoCount; MeshInfo Info[MeshInfoCount] ; @@ -460,15 +481,17 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - AnimSchema skip[16]; // Replace this with the schema length + //AnimSchema skip[16]; // Replace this with the schema length // DELTA //AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p - //AnimSchema skip[8]; + AnimSchema skip[8]; } else { - AnimSchema skip[4]; + //AnimSchema skip[4]; + // Condemned, empty is 8? + AnimSchema skip[8]; } AnimData skip2[hdr.AnimationDataLength]; //Animation Anim(hdr.NodeCount, animHdr)[hdr.AnimationCount] ; @@ -479,11 +502,10 @@ AnimationWeightSetHeader animWeightSetHeader(hdr.NodeCount); SocketHeader socketHeader; if (hdr.Version == 34) { - CondemnedUnk unk; -} - + CondemnedUnk Unk; +} else { ChildModelHeader childModels; - +} ContainsGeometry hasGeometry; // Geometry related PhysicsHeader PhysicsInfo; @@ -491,7 +513,7 @@ PhysicsHeader PhysicsInfo; LODGroupHeader lodGroupHeader; Unk2 unk2; -MeshHeader meshHeader; +MeshHeader meshHeader(hdr.Version); AfterMesh Debug; //FSkip(420); // DeltaForce //MeshHeader meshHeader2; From 9b377a08a515bcf1cd731f6ef4fdb948fc508103 Mon Sep 17 00:00:00 2001 From: Jake Date: Tue, 9 Feb 2021 22:54:54 -0800 Subject: [PATCH 20/24] Fix the UV map --- src/reader_model00p_pc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index 093180f..aed4e89 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -315,9 +315,11 @@ def _read_mesh_data(self, pieces, f): # This may not entirely be correct, as IndexList / 3 sometimes does not equal triangle counts! for index in range(info.triangle_count * 3): face_vertex = FaceVertex() - face_vertex.texcoord = mesh_data.uvs + face_vertex.vertex_index = index_list[running_index_list_index] + face_vertex.texcoord = mesh_data_list[face_vertex.vertex_index].uvs + face_vertex.vertex_index -= info.index_list_start face.vertices.append(face_vertex) From 69e585a46522c355cbd381d15026a8d892ffb239 Mon Sep 17 00:00:00 2001 From: Jake Date: Wed, 10 Feb 2021 21:36:35 -0800 Subject: [PATCH 21/24] Get ~some~ Condemned models loading --- research/pc_model00p.bt | 20 +++---- src/reader_model00p_pc.py | 113 +++++++------------------------------- 2 files changed, 31 insertions(+), 102 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 219c7a5..3f33d7e 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -269,6 +269,12 @@ struct ChildModelHeader { ChildModel ChildModels[ChildModelCount - 1] ; }; +struct ChildModelHeaderv34 { + int ChildModelCount; + // Can't find a v34 model with child models, so this is just a guess! + ChildModel ChildModels[ChildModelCount] ; +}; + struct NodeWeight { char Physics; // Differences from 1.0/0.0? @@ -444,10 +450,6 @@ struct SocketHeader { Socket Sockets[SocketCount] ; }; -struct CondemnedUnk { - int Unk; -}; - // Related to Deformers struct MeshInfo { int IndexListStart; @@ -463,8 +465,6 @@ struct MeshInfo { struct AfterMesh { int Unk; int ShortDataCount; - int Unk3; - int DataSize; // Maybe? short ShortData[ShortDataCount / 2]; // Mesh Info? int MeshInfoCount; @@ -481,13 +481,13 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - //AnimSchema skip[16]; // Replace this with the schema length + AnimSchema skip[16]; // Replace this with the schema length // DELTA //AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p - AnimSchema skip[8]; + //AnimSchema skip[8]; } else { //AnimSchema skip[4]; // Condemned, empty is 8? @@ -502,9 +502,9 @@ AnimationWeightSetHeader animWeightSetHeader(hdr.NodeCount); SocketHeader socketHeader; if (hdr.Version == 34) { - CondemnedUnk Unk; + ChildModelHeaderv34 childModels; } else { -ChildModelHeader childModels; + ChildModelHeader childModels; } ContainsGeometry hasGeometry; // Geometry related diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index aed4e89..cc1fc93 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -58,6 +58,7 @@ def __init__(self): self.unk_2 = Vector() self.weight_info = [] self.node_indexes = [] + self.colour = [] def read(self, reader, f): self.vertex = reader._read_vector(f) @@ -65,14 +66,16 @@ def read(self, reader, f): self.uvs.xy = reader._unpack('2f', f) self.unk_1 = reader._read_vector(f) self.unk_2 = reader._read_vector(f) + + if reader.version == 34: + self.colour = reader._unpack('4B', f) + self.weight_info = reader._unpack('3B', f) padding = reader._unpack('B', f)[0] self.node_indexes = reader._unpack('3B', f) padding = reader._unpack('B', f)[0] # Reverse the weight info, I'm not sure why it's flipped... - #self.weight_info.reverse() - # It's a tuple actually... self.weight_info = tuple(reversed(self.weight_info)) return self @@ -186,43 +189,6 @@ def _read_face(self, f): face.vertices = [self._read_face_vertex(f) for _ in range(3)] return face - def _read_null_mesh(self, lod, f): - # No data here but a filler int! - f.seek(4, 1) - return lod - - # def _read_mesh_data(self, lod, f): - - # vertex = Vertex() - # face = Face() - # face_vertex = FaceVertex() - - # vertex.location = self._read_vector(f) - # vertex.normal = self._read_vector(f) - - # face_vertex.texcoord.xy = self._unpack('2f', f) - - # lod.vertices.append(vertex) - # #lod.faces.append(face_vertex) - - # # Test to see what these unk vectors could be! - # # vertex.location = self._read_vector(f) - # # lod.vertices.append(vertex) - # # vertex.location = self._read_vector(f) - # # lod.vertices.append(vertex) - # unk_vec_1 = self._read_vector(f) - # unk_vec_2 = self._read_vector(f) - - - - - # weight_info = self._unpack('4b', f) - # unk_1 = self._unpack('I', f)[0] - - # return lod - - - def _read_mesh_data(self, pieces, f): debug_ftell = f.tell() @@ -231,10 +197,15 @@ def _read_mesh_data(self, pieces, f): data_length = self._unpack('I', f)[0] index_list_length = self._unpack('I', f)[0] - print("Face Count ", data_length / 64) + print("Mesh Data Triangle Count: ", data_length / 64) + + # Total size of the MeshData structure + mesh_data_size = 64 + if self.version == 34: + mesh_data_size = 68 # Data Length / Structure Size - mesh_data_list = [ self.MeshData().read(self, f) for _ in range( int(data_length / 64) ) ] + mesh_data_list = [ self.MeshData().read(self, f) for _ in range( int(data_length / mesh_data_size) ) ] index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] debug_ftell = f.tell() @@ -243,28 +214,16 @@ def _read_mesh_data(self, pieces, f): # These seem to be the same, so asserts are here for debug. unk_1 = self._unpack('I', f)[0] assert(unk_1 == 1) - unk_2 = self._unpack('I', f)[0] - assert(unk_2 == 64) - unk_3 = self._unpack('I', f)[0] - assert(unk_3 == 0) - unk_4 = self._unpack('I', f)[0] - assert(unk_4 == 2) - - # 24 shorts, might have something to do with unk_2? - unk_short_list = self._unpack('24H', f) - - # More unknown data that seems to be the same - unk_5 = self._unpack('I', f)[0] - assert(unk_5 == 255) - unk_6 = self._unpack('I', f)[0] - assert(unk_6 == 17) + short_count = self._unpack('I', f)[0] + + # Not sure what this is, but it I can safely ignore it for now. + unk_short_list = [ self._unpack('H', f)[0] for _ in range( int(short_count / 2) ) ] # Okay here's the mesh info! mesh_info_count = self._unpack('I', f)[0] mesh_info = [ self.MeshInfo().read(self, f) for _ in range(mesh_info_count) ] # End - # Some running totals running_lod_index = 0 running_index_list_index = 0 @@ -275,10 +234,6 @@ def _read_mesh_data(self, pieces, f): lod = piece.lods[lod_index] info = mesh_info[running_lod_index] - - - - # Set the material index (for the main lod only!) if lod_index == 0: piece.material_index = info.material_index @@ -336,37 +291,6 @@ def _read_mesh_data(self, pieces, f): return pieces - # OLD - - # Holds 3 face vertices - face = Face() - lod = LOD() - - real_index = 0 - - for index in index_list: - mesh_data = mesh_data_list[index] - - vertex = Vertex() - vertex.location = mesh_data.vertex - vertex.normal = mesh_data.normal - lod.vertices.append(vertex) - - face_vertex = FaceVertex() - face_vertex.texcoord = mesh_data.uvs - face_vertex.vertex_index = real_index - - face.vertices.append(face_vertex) - - if len(face.vertices) == 3: - lod.faces.append(face) - face = Face() - - real_index += 1 - # End For - - return lod - def _read_lod(self, f): lod = LOD() @@ -1050,6 +974,11 @@ def handle_carry_over(flag_type, keyframe_list, defaults_list, keyframe_index, n # Child Models # child_model_count = self._unpack('I', f)[0] + + # In v34 they reduced the count by 1. (Before child model count use to include itself!) + if self.version == 34: + child_model_count += 1 + model.child_models = [self._read_child_model(f) for _ in range(child_model_count - 1)] debug_ftell = f.tell() From fff1c0a858e1e77bf6d02395be2237ddeabab8b9 Mon Sep 17 00:00:00 2001 From: Jake Date: Wed, 10 Feb 2021 22:42:52 -0800 Subject: [PATCH 22/24] Fix big-endian mode, and start reading the dang mesh data properly. --- research/pc_model00p.bt | 28 +++++++++---- src/reader_model00p_pc.py | 86 ++++++++++++++++++++++++--------------- 2 files changed, 73 insertions(+), 41 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index 3f33d7e..ad2862f 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -44,6 +44,11 @@ struct LTMatrix { struct Header { char Format[4]; + + if (Format == "LDOM") { + BigEndian(); + } + int Version; int KeyframeCount; int AnimationCount; @@ -428,6 +433,7 @@ struct MeshHeader (int Version) { Meshv34 MeshData[DataLength/68]; } else { Mesh MeshData[DataLength/64]; + //Meshv34 MeshData[DataLength/68]; } short IndexList[IndexListLength/2]; // DataCount in bytes @@ -452,20 +458,26 @@ struct SocketHeader { // Related to Deformers struct MeshInfo { - int IndexListStart; - int IndexListCount; - int Unk[3]; + int MeshDataStart; + int MeshDataCount; + //int IndexListStart; + //int IndexListCount; + int MeshDataSize; + int IndexListPosition; + //int MeshDataPosition; // Maybe? + int Unk; int TriangleCount; int MaterialIndex; - short InfluenceCount; + int InfluenceCount; int Unk; char NodeIndexes[InfluenceCount]; }; struct AfterMesh { int Unk; - int ShortDataCount; - short ShortData[ShortDataCount / 2]; + int ByteDataCount; + //short ShortData[ShortDataCount / 2]; + uchar ByteList[ByteDataCount]; // Mesh Info? int MeshInfoCount; MeshInfo Info[MeshInfoCount] ; @@ -481,9 +493,9 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - AnimSchema skip[16]; // Replace this with the schema length + //AnimSchema skip[16]; // Replace this with the schema length // DELTA - //AnimSchema skip[652]; // delta 652 + AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index cc1fc93..c73f5dc 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -50,7 +50,7 @@ def __init__(self): # Helper class for reading in mesh data # TODO: Need to clean up and move into a model class whenever I get to that. class MeshData(object): - def __init__(self): + def __init__(self, data_size): self.vertex = Vector() self.normal = Vector() self.uvs = Vector() @@ -60,6 +60,10 @@ def __init__(self): self.node_indexes = [] self.colour = [] + # FEAR uses 64, which is mesh data WITHOUT colour info + # Condemned uses 68, which includes colour info. + self.data_size = data_size + def read(self, reader, f): self.vertex = reader._read_vector(f) self.normal = reader._read_vector(f) @@ -67,7 +71,7 @@ def read(self, reader, f): self.unk_1 = reader._read_vector(f) self.unk_2 = reader._read_vector(f) - if reader.version == 34: + if self.data_size == 68: self.colour = reader._unpack('4B', f) self.weight_info = reader._unpack('3B', f) @@ -85,7 +89,7 @@ class MeshInfo(object): def __init__(self): self.index_list_start = 0 self.index_list_count = 0 - self.unk_1 = 0 + self.mesh_data_size = 0 self.unk_2 = 0 self.unk_3 = 0 self.triangle_count = 0 @@ -97,7 +101,7 @@ def __init__(self): def read(self, reader, f): self.index_list_start = reader._unpack('I', f)[0] self.index_list_count = reader._unpack('I', f)[0] - self.unk_1 = reader._unpack('I', f)[0] + self.mesh_data_size = reader._unpack('I', f)[0] self.unk_2 = reader._unpack('I', f)[0] self.unk_3 = reader._unpack('I', f)[0] self.triangle_count = reader._unpack('I', f)[0] @@ -199,29 +203,45 @@ def _read_mesh_data(self, pieces, f): print("Mesh Data Triangle Count: ", data_length / 64) - # Total size of the MeshData structure - mesh_data_size = 64 - if self.version == 34: - mesh_data_size = 68 + # We need to read the MeshInfo data that's located AFTER the actual mesh data + # So skip the mesh data for now... - # Data Length / Structure Size - mesh_data_list = [ self.MeshData().read(self, f) for _ in range( int(data_length / mesh_data_size) ) ] - index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] + mesh_data_position = f.tell() - debug_ftell = f.tell() + f.seek(data_length, 1) + f.seek(index_list_length, 1) # Unknown after mesh data # These seem to be the same, so asserts are here for debug. unk_1 = self._unpack('I', f)[0] assert(unk_1 == 1) - short_count = self._unpack('I', f)[0] + + byte_list_count = self._unpack('I', f)[0] # Not sure what this is, but it I can safely ignore it for now. - unk_short_list = [ self._unpack('H', f)[0] for _ in range( int(short_count / 2) ) ] + unk_byte_list = [ self._unpack('B', f)[0] for _ in range(byte_list_count) ] # Okay here's the mesh info! mesh_info_count = self._unpack('I', f)[0] mesh_info = [ self.MeshInfo().read(self, f) for _ in range(mesh_info_count) ] + + mesh_info_position = f.tell() + + # FIXME: I now need to read mesh data per piece instead of all at once now... + mesh_data_size = mesh_info[0].mesh_data_size + + # Hop back to the mesh data + f.seek(mesh_data_position, 0) + + # Data Length / Structure Size + mesh_data_list = [ self.MeshData(mesh_data_size).read(self, f) for _ in range( int(data_length / mesh_data_size) ) ] + index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] + + debug_ftell = f.tell() + + # Annnnd hope back + f.seek(mesh_info_position, 0) + # End # Some running totals @@ -307,9 +327,9 @@ def _read_lod(self, f): def _read_lod_group(self, f): piece = Piece() - name_offset = unpack('I', f)[0] + name_offset = self._unpack('I', f)[0] piece.name = self._get_string_from_table(name_offset) - lod_count = unpack('I', f)[0] + lod_count = self._unpack('I', f)[0] piece.lods = [ self._read_lod(f) for _ in range(lod_count) ] @@ -318,7 +338,7 @@ def _read_lod_group(self, f): def _read_pieces(self, f): debug_ftell = f.tell() - lod_group_count = unpack('I', f)[0] + lod_group_count = self._unpack('I', f)[0] # Not quite sure this is right... if lod_group_count == 0: @@ -327,12 +347,12 @@ def _read_pieces(self, f): pieces = [ self._read_lod_group(f) for _ in range(lod_group_count) ] # Unknown values! - unk_1 = unpack('I', f)[0] - unk_2 = unpack('I', f)[0] - unk_3 = unpack('I', f)[0] + unk_1 = self._unpack('I', f)[0] + unk_2 = self._unpack('I', f)[0] + unk_3 = self._unpack('I', f)[0] - unk_4 = unpack('I', f)[0] - unk_5 = unpack('I', f)[0] + unk_4 = self._unpack('I', f)[0] + unk_5 = self._unpack('I', f)[0] # End unknown values @@ -383,7 +403,7 @@ def _read_compressed_transform(self, compression_type, keyframe_count, f): for _ in range(self.node_count): # RLE! - key_position_count = unpack('I', f)[0] + key_position_count = self._unpack('I', f)[0] compressed_positions = [] if compression_type == CMP_Relevant or compression_type == CMP_Relevant_Rot16: @@ -392,7 +412,7 @@ def _read_compressed_transform(self, compression_type, keyframe_count, f): compressed_positions = [self._process_compressed_vector(unpack('3h', f)) for _ in range(key_position_count)] # End If - key_rotation_count = unpack('I', f)[0] + key_rotation_count = self._unpack('I', f)[0] compressed_rotations = [] if compression_type == CMP_Relevant: @@ -439,8 +459,8 @@ def _read_child_model(self, f): def _read_keyframe(self, f): keyframe = Animation.Keyframe() - keyframe.time = unpack('I', f)[0] - string_offset = unpack('I', f)[0] + keyframe.time = self._unpack('I', f)[0] + string_offset = self._unpack('I', f)[0] keyframe.string = self._get_string_from_table(string_offset) return keyframe @@ -448,15 +468,15 @@ def _read_animation(self, f): animation = Animation() animation.extents = self._read_vector(f) animation.name = self._read_string(f) - animation.compression_type = unpack('i', f)[0] - animation.interpolation_time = unpack('I', f)[0] - animation.keyframe_count = unpack('I', f)[0] + animation.compression_type = self._unpack('i', f)[0] + animation.interpolation_time = self._unpack('I', f)[0] + animation.keyframe_count = self._unpack('I', f)[0] animation.keyframes = [self._read_keyframe(f) for _ in range(animation.keyframe_count)] animation.node_keyframe_transforms = [] if animation.compression_type == CMP_None: for _ in range(self.node_count): - animation.is_vertex_animation = unpack('b', f)[0] + animation.is_vertex_animation = self._unpack('b', f)[0] # We don't support vertex animations yet, so alert if we accidentally load some! assert(animation.is_vertex_animation == 0) @@ -472,7 +492,7 @@ def _read_animation(self, f): def _read_socket(self, f): socket = Socket() - socket.node_index = unpack('I', f)[0] + socket.node_index = self._unpack('I', f)[0] name_offset = self._unpack('I', f)[0] socket.name = self._get_string_from_table(name_offset) @@ -520,10 +540,10 @@ def _read_anim_info(self, f): def _read_weight_set(self, f): weight_set = WeightSet() - name_offset = unpack('I', f)[0] + name_offset = self._unpack('I', f)[0] weight_set.name = self._get_string_from_table(name_offset) - unk_1 = unpack('I', f)[0] + unk_1 = self._unpack('I', f)[0] weight_set.node_weights = [unpack('f', f)[0] for _ in range(self.node_count)] return weight_set From bf6d78c3bff7cdaeab0f5f1ea9c80f86b5049dd4 Mon Sep 17 00:00:00 2001 From: Jake Date: Wed, 10 Feb 2021 22:44:49 -0800 Subject: [PATCH 23/24] Refactor some variable names to match their actual use. --- src/reader_model00p_pc.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index c73f5dc..fb52431 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -87,10 +87,10 @@ def read(self, reader, f): # Another helper class, this should be shoved into whatever I refactor into FEAR's model. class MeshInfo(object): def __init__(self): - self.index_list_start = 0 - self.index_list_count = 0 + self.mesh_data_start = 0 + self.mesh_data_count = 0 self.mesh_data_size = 0 - self.unk_2 = 0 + self.index_list_position = 0 self.unk_3 = 0 self.triangle_count = 0 self.material_index = 0 @@ -99,10 +99,10 @@ def __init__(self): self.influence_node_indexes = [] def read(self, reader, f): - self.index_list_start = reader._unpack('I', f)[0] - self.index_list_count = reader._unpack('I', f)[0] + self.mesh_data_start = reader._unpack('I', f)[0] + self.mesh_data_count = reader._unpack('I', f)[0] self.mesh_data_size = reader._unpack('I', f)[0] - self.unk_2 = reader._unpack('I', f)[0] + self.index_list_position = reader._unpack('I', f)[0] self.unk_3 = reader._unpack('I', f)[0] self.triangle_count = reader._unpack('I', f)[0] self.material_index = reader._unpack('I', f)[0] @@ -258,7 +258,7 @@ def _read_mesh_data(self, pieces, f): if lod_index == 0: piece.material_index = info.material_index - for vertex_index in range(info.index_list_start, info.index_list_start + info.index_list_count): + for vertex_index in range(info.mesh_data_start, info.mesh_data_start + info.mesh_data_count): mesh_data = mesh_data_list[ vertex_index ] vertex = Vertex() vertex.location = mesh_data.vertex @@ -295,7 +295,7 @@ def _read_mesh_data(self, pieces, f): face_vertex.texcoord = mesh_data_list[face_vertex.vertex_index].uvs - face_vertex.vertex_index -= info.index_list_start + face_vertex.vertex_index -= info.mesh_data_start face.vertices.append(face_vertex) From 5438eaeddb5c750c34b50b47bf6442d14d664d25 Mon Sep 17 00:00:00 2001 From: Jake Date: Sat, 13 Feb 2021 12:35:30 -0800 Subject: [PATCH 24/24] Read the data a bit more properly. Also account for weird material indexing. --- research/pc_model00p.bt | 10 +++++++--- src/reader_model00p_pc.py | 40 +++++++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/research/pc_model00p.bt b/research/pc_model00p.bt index ad2862f..70d90e4 100644 --- a/research/pc_model00p.bt +++ b/research/pc_model00p.bt @@ -63,7 +63,7 @@ struct Header { int PhysicsWeightCount; int PhysicsShapeCount; int Unk12; - int Unk13; + int BallAndSocketContraintCount; // Not confirmed int StiffSpringConstraintCount; int HingeConstraintCount; int LimitedHingeConstraintCount; @@ -493,13 +493,17 @@ AnimationHeader animHdr; if (hdr.AnimationDataLength > 0) { // CUBE - //AnimSchema skip[16]; // Replace this with the schema length + AnimSchema skip[16]; // Replace this with the schema length // DELTA - AnimSchema skip[652]; // delta 652 + //AnimSchema skip[652]; // delta 652 // ROPE //AnimSchema skip[32]; // Condemned Default00p //AnimSchema skip[8]; + // Prototype + //AnimSchema skip[1212]; + // Player NewHand + //AnimSchema skip[272]; } else { //AnimSchema skip[4]; // Condemned, empty is 8? diff --git a/src/reader_model00p_pc.py b/src/reader_model00p_pc.py index fb52431..6410a6d 100644 --- a/src/reader_model00p_pc.py +++ b/src/reader_model00p_pc.py @@ -47,6 +47,9 @@ def __init__(self): # Temp until I can figure out how animations work! self._read_animations = False + # Re-map our material indexes + self.material_index_list = {} + # Helper class for reading in mesh data # TODO: Need to clean up and move into a model class whenever I get to that. class MeshData(object): @@ -64,6 +67,9 @@ def __init__(self, data_size): # Condemned uses 68, which includes colour info. self.data_size = data_size + if data_size not in [64, 68]: + print("WARNING: Non-standard MeshData size. Size is %d" % data_size) + def read(self, reader, f): self.vertex = reader._read_vector(f) self.normal = reader._read_vector(f) @@ -227,14 +233,26 @@ def _read_mesh_data(self, pieces, f): mesh_info_position = f.tell() - # FIXME: I now need to read mesh data per piece instead of all at once now... - mesh_data_size = mesh_info[0].mesh_data_size - # Hop back to the mesh data f.seek(mesh_data_position, 0) - # Data Length / Structure Size - mesh_data_list = [ self.MeshData(mesh_data_size).read(self, f) for _ in range( int(data_length / mesh_data_size) ) ] + # NEW + + # TODO: Maybe sort info by data start? + + mesh_data_list = [] + for index in range(mesh_info_count): + info = mesh_info[index] + + length = info.mesh_data_count + for _ in range( length ): + data = self.MeshData(info.mesh_data_size) + mesh_data_list.append( data.read(self, f) ) + + # END NEW + + print("Data List Length -> ", len(mesh_data_list)) + index_list = [ self._unpack('H', f)[0] for _ in range( int(index_list_length / 2) ) ] debug_ftell = f.tell() @@ -253,10 +271,18 @@ def _read_mesh_data(self, pieces, f): for lod_index in range(len(piece.lods)): lod = piece.lods[lod_index] info = mesh_info[running_lod_index] + running_index_list_index = info.index_list_position # Set the material index (for the main lod only!) if lod_index == 0: - piece.material_index = info.material_index + + if info.material_index in self.material_index_list: + piece.material_index = self.material_index_list[info.material_index] + else: + length = len(self.material_index_list) + self.material_index_list[info.material_index] = piece.material_index = length + + #piece.material_index = info.material_index for vertex_index in range(info.mesh_data_start, info.mesh_data_start + info.mesh_data_count): mesh_data = mesh_data_list[ vertex_index ] @@ -621,6 +647,8 @@ def _read_physics(self, f): physics.vis_radius = self._unpack('f', f)[0] physics.shape_count = self._unpack('I', f)[0] + # TODO: Read each physics-based count in the order they're placed in the header + physics.shapes = [ self._read_physics_shape(f) for _ in range(physics.shape_count) ] physics.constraint_count = self._unpack('I', f)[0]