mirror of https://github.com/AxioDL/metaforce.git
initial port of original blender addon to HECL
This commit is contained in:
parent
db0ce36b90
commit
b941440418
|
@ -0,0 +1,14 @@
|
|||
#ifndef CBLENDERCONNECTION_HPP
|
||||
#define CBLENDERCONNECTION_HPP
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
class CBlenderConnection
|
||||
{
|
||||
pid_t m_blenderProc;
|
||||
public:
|
||||
CBlenderConnection();
|
||||
~CBlenderConnection();
|
||||
};
|
||||
|
||||
#endif // CBLENDERCONNECTION_HPP
|
|
@ -0,0 +1,83 @@
|
|||
'''
|
||||
Root HECL addon package for Blender
|
||||
'''
|
||||
|
||||
bl_info = {
|
||||
"name": "HECL",
|
||||
"author": "Jack Andersen <jackoalan@gmail.com>",
|
||||
"version": (1, 0),
|
||||
"blender": (2, 69),
|
||||
"tracker_url": "https://github.com/RetroView/hecl/issues/new",
|
||||
"location": "Properties > Scene > HECL",
|
||||
"description": "Enables blender to gather meshes, materials, and textures for hecl",
|
||||
"category": "System"}
|
||||
|
||||
# Package import
|
||||
from . import hmdl
|
||||
import bpy, os, sys
|
||||
from bpy.app.handlers import persistent
|
||||
|
||||
# Appendable list allowing external addons to register additional resource types
|
||||
hecl_export_types = [
|
||||
('NONE', "None", "Active scene not using HECL", None, None),
|
||||
('MESH', "Mesh", "Active scene represents an RWK Mesh", hmdl.panel_draw, hmdl.cook)]
|
||||
|
||||
# Main Scene Panel
|
||||
class hecl_scene_panel(bpy.types.Panel):
|
||||
bl_idname = "SCENE_PT_hecl"
|
||||
bl_label = "HECL"
|
||||
bl_space_type = 'PROPERTIES'
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_context = "scene"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return (context.scene is not None)
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
type_row = layout.row(align=True)
|
||||
type_row.prop_menu_enum(context.scene, 'hecl_export_type', text='Export Type')
|
||||
|
||||
for exp_type in hecl_export_types:
|
||||
if exp_type[0] == context.scene.hecl_export_type and callable(exp_type[3]):
|
||||
exp_type[3](self, context)
|
||||
break
|
||||
|
||||
|
||||
# Blender-selected polymorphism
|
||||
def do_package(writefd, platform_type, endian_char):
|
||||
for tp in hecl_export_types:
|
||||
if tp[0] == bpy.context.scene.hecl_export_type:
|
||||
if callable(tp[4]):
|
||||
tp[4](writefd, platform_type, endian_char)
|
||||
|
||||
|
||||
# Blender export-type registration
|
||||
def register_export_type_enum():
|
||||
bpy.types.Scene.hecl_export_type = bpy.props.EnumProperty(items=
|
||||
[tp[:3] for tp in hecl_export_types],
|
||||
name="HECL Export Type",
|
||||
description="Selects how active scene is exported by HECL")
|
||||
|
||||
# Function for external addons to register export types with HECL
|
||||
def add_export_type(type_tuple):
|
||||
type_tup = tuple(type_tuple)
|
||||
for tp in hecl_export_types:
|
||||
if tp[0] == type_tup[0]:
|
||||
raise RuntimeError("Type already registered with HECL")
|
||||
hecl_export_types.append(type_tup)
|
||||
register_export_type_enum()
|
||||
|
||||
# Registration
|
||||
def register():
|
||||
hmdl.register()
|
||||
bpy.utils.register_class(hecl_scene_panel)
|
||||
register_export_type_enum()
|
||||
|
||||
def unregister():
|
||||
hmdl.unregister()
|
||||
bpy.utils.unregister_class(hecl_scene_panel)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
|
@ -0,0 +1,309 @@
|
|||
'''
|
||||
HMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
This Python module provides a generator implementation for
|
||||
the 'HMDL' mesh format designed for use with HECL.
|
||||
|
||||
The format features three main sections:
|
||||
* Shader table
|
||||
* Skin-binding table
|
||||
* Mesh table (VBOs [array,element], VAO attribs, drawing index)
|
||||
|
||||
The Shader table provides index-referenced binding points
|
||||
for mesh-portions to use for rendering.
|
||||
|
||||
The Skin-binding table provides the runtime with identifiers
|
||||
to use in ensuring the correct bone-transformations are bound
|
||||
to the shader when rendering a specific primitive.
|
||||
|
||||
The Mesh table contains Vertex and Element buffers with interleaved
|
||||
Positions, Normals, UV coordinates, and Weight Vectors
|
||||
'''
|
||||
|
||||
import struct, bpy, bmesh
|
||||
from mathutils import Vector
|
||||
from . import hmdl_shader
|
||||
from . import hmdl_skin
|
||||
from . import hmdl_mesh
|
||||
from . import hmdl_anim
|
||||
|
||||
def get_3d_context(object_):
|
||||
window = bpy.context.window_manager.windows[0]
|
||||
screen = window.screen
|
||||
for area in screen.areas:
|
||||
if area.type == "VIEW_3D":
|
||||
area3d = area
|
||||
break
|
||||
for region in area3d.regions:
|
||||
if region.type == "WINDOW":
|
||||
region3d = region
|
||||
break
|
||||
override = {
|
||||
"window": window,
|
||||
"screen": screen,
|
||||
"area": area3d,
|
||||
"region": region3d,
|
||||
"object": object_
|
||||
}
|
||||
|
||||
return override
|
||||
|
||||
|
||||
# Generate Skeleton Info structure (free-form tree structure)
|
||||
def generate_skeleton_info(armature, endian_char='<'):
|
||||
|
||||
bones = []
|
||||
for bone in armature.data.bones:
|
||||
bone_bytes = bytearray()
|
||||
|
||||
# Write bone hash
|
||||
bone_bytes += struct.pack(endian_char + 'I', hmdl_anim.hashbone(bone.name))
|
||||
|
||||
for comp in bone.head_local:
|
||||
bone_bytes += struct.pack(endian_char + 'f', comp)
|
||||
|
||||
parent_idx = -1
|
||||
if bone.parent:
|
||||
parent_idx = armature.data.bones.find(bone.parent.name)
|
||||
bone_bytes += struct.pack(endian_char + 'i', parent_idx)
|
||||
|
||||
bone_bytes += struct.pack(endian_char + 'I', len(bone.children))
|
||||
|
||||
for child in bone.children:
|
||||
child_idx = armature.data.bones.find(child.name)
|
||||
bone_bytes += struct.pack(endian_char + 'I', child_idx)
|
||||
|
||||
bones.append(bone_bytes)
|
||||
|
||||
# Generate bone tree data
|
||||
info_bytes = bytearray()
|
||||
info_bytes += struct.pack(endian_char + 'I', len(bones))
|
||||
|
||||
cur_offset = len(bones) * 4 + 4
|
||||
for bone in bones:
|
||||
info_bytes += struct.pack(endian_char + 'I', cur_offset)
|
||||
cur_offset += len(bone)
|
||||
|
||||
for bone in bones:
|
||||
info_bytes += bone
|
||||
|
||||
return info_bytes
|
||||
|
||||
|
||||
# Takes a Blender 'Mesh' object (not the datablock)
|
||||
# and performs a one-shot conversion process to HMDL; packaging
|
||||
# into the HECL data-pipeline and returning a hash once complete
|
||||
def cook(writefd, platform_type, endian_char):
|
||||
mesh_obj = bpy.data.objects[bpy.context.scene.hecl_mesh_obj]
|
||||
if mesh_obj.type != 'MESH':
|
||||
raise RuntimeError("{0} is not a mesh".format(mesh_obj.name))
|
||||
|
||||
# Partial meshes
|
||||
part_meshes = set()
|
||||
|
||||
# Start with shader, mesh and rigging-info generation.
|
||||
# Use SHA1 hashing to determine what the ID-hash will be when
|
||||
# shaders are packaged; strip out duplicates
|
||||
shader_set = []
|
||||
rigger = None
|
||||
|
||||
# Normalize all vertex weights
|
||||
override = get_3d_context(mesh_obj)
|
||||
try:
|
||||
bpy.ops.object.vertex_group_normalize_all(override, lock_active=False)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Copy mesh (and apply mesh modifiers)
|
||||
copy_name = mesh_obj.name + "_hmdltri"
|
||||
copy_mesh = bpy.data.meshes.new(copy_name)
|
||||
copy_obj = bpy.data.objects.new(copy_name, copy_mesh)
|
||||
copy_obj.data = mesh_obj.to_mesh(bpy.context.scene, True, 'RENDER')
|
||||
copy_obj.scale = mesh_obj.scale
|
||||
bpy.context.scene.objects.link(copy_obj)
|
||||
|
||||
# If skinned, establish rigging generator
|
||||
if len(mesh_obj.vertex_groups):
|
||||
rigger = hmdl_skin.hmdl_skin(max_bone_count, mesh_obj.vertex_groups)
|
||||
|
||||
# Determine count of transformation matricies to deliver to shader set
|
||||
actual_max_bone_counts = [1] * len(mesh_obj.data.materials)
|
||||
max_bone_count = 1
|
||||
for mat_idx in range(len(mesh_obj.data.materials)):
|
||||
mat = mesh_obj.data.materials[mat_idx]
|
||||
count = hmdl_shader.max_transform_counter(mat, mesh_obj)
|
||||
if count > 1:
|
||||
actual_max_bone_counts[mat_idx] = count
|
||||
if count > max_bone_count:
|
||||
max_bone_count = count
|
||||
|
||||
# Sort materials by pass index first
|
||||
sorted_material_idxs = []
|
||||
source_mat_set = set(range(len(mesh_obj.data.materials)))
|
||||
while len(source_mat_set):
|
||||
min_mat_idx = source_mat_set.pop()
|
||||
source_mat_set.add(min_mat_idx)
|
||||
for mat_idx in source_mat_set:
|
||||
if mesh_obj.data.materials[mat_idx].pass_index < mesh_obj.data.materials[min_mat_idx].pass_index:
|
||||
min_mat_idx = mat_idx
|
||||
sorted_material_idxs.append(min_mat_idx)
|
||||
source_mat_set.discard(min_mat_idx)
|
||||
|
||||
# Generate shaders
|
||||
actual_max_texmtx_count = 0
|
||||
for mat_idx in sorted_material_idxs:
|
||||
|
||||
shader_hashes = []
|
||||
shader_uv_count = 0
|
||||
|
||||
if mesh_obj.data.hecl_material_count > 0:
|
||||
for grp_idx in range(mesh_obj.data.hecl_material_count):
|
||||
for mat in bpy.data.materials:
|
||||
if mat.name.endswith('_%u_%u' % (grp_idx, mat_idx)):
|
||||
hecl_str = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
|
||||
|
||||
|
||||
else:
|
||||
mat = mesh_obj.data.materials[mat_idx]
|
||||
hecl_str = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
|
||||
|
||||
mesh_maker = hmdl_mesh.hmdl_mesh()
|
||||
|
||||
# Make special version of mesh with just the relevant material;
|
||||
# Also perform triangulation
|
||||
mesh = bpy.data.meshes.new(copy_obj.name + '_' + str(mat_idx))
|
||||
part_meshes.add(mesh)
|
||||
bm = bmesh.new()
|
||||
bm.from_mesh(copy_obj.data)
|
||||
to_remove = []
|
||||
shader_center = Vector((0,0,0))
|
||||
shader_center_count = 0
|
||||
for face in bm.faces:
|
||||
if face.material_index != mat_idx:
|
||||
to_remove.append(face)
|
||||
else:
|
||||
shader_center += face.calc_center_bounds()
|
||||
shader_center_count += 1
|
||||
shader_center /= shader_center_count
|
||||
bmesh.ops.delete(bm, geom=to_remove, context=5)
|
||||
bmesh.ops.triangulate(bm, faces=bm.faces)
|
||||
bm.to_mesh(mesh)
|
||||
bm.free()
|
||||
|
||||
# Optimise mesh
|
||||
if rigger:
|
||||
mesh_maker.add_mesh(mesh, rigger, shader_uv_count)
|
||||
else:
|
||||
mesh_maker.add_mesh(mesh, None, shader_uv_count)
|
||||
|
||||
shader_set.append((shader_hashes, mesh_maker, shader_center))
|
||||
|
||||
# Filter out useless AABB points and generate data array
|
||||
aabb = bytearray()
|
||||
for comp in copy_obj.bound_box[0]:
|
||||
aabb += struct.pack(endian_char + 'f', comp)
|
||||
for comp in copy_obj.bound_box[6]:
|
||||
aabb += struct.pack(endian_char + 'f', comp)
|
||||
|
||||
# Delete copied mesh from scene
|
||||
bpy.context.scene.objects.unlink(copy_obj)
|
||||
bpy.data.objects.remove(copy_obj)
|
||||
bpy.data.meshes.remove(copy_mesh)
|
||||
|
||||
# Count total collections
|
||||
total_collections = 0
|
||||
for shader in shader_set:
|
||||
total_collections += len(shader[1].collections)
|
||||
|
||||
# Start writing master buffer
|
||||
output_data = bytearray()
|
||||
output_data += aabb
|
||||
output_data += struct.pack(endian_char + 'III', mesh_obj.data.hecl_material_count, len(shader_set), total_collections)
|
||||
|
||||
# Shader Reference Data (truncated SHA1 hashes)
|
||||
if mesh_obj.data.hecl_material_count > 0:
|
||||
for grp_idx in range(mesh_obj.data.hecl_material_count):
|
||||
for shader in shader_set:
|
||||
output_data += shader[0][grp_idx]
|
||||
else:
|
||||
for shader in shader_set:
|
||||
for subshader in shader[0]:
|
||||
output_data += subshader
|
||||
|
||||
# Generate mesh data
|
||||
for shader in shader_set:
|
||||
mesh_maker = shader[1]
|
||||
output_data += struct.pack(endian_char + 'Ifff', len(mesh_maker.collections), shader[2][0], shader[2][1], shader[2][2])
|
||||
for coll_idx in range(len(mesh_maker.collections)):
|
||||
|
||||
# Vert Buffer
|
||||
uv_count, max_bones, vert_bytes, vert_arr = mesh_maker.generate_vertex_buffer(coll_idx, endian_char)
|
||||
output_data += struct.pack(endian_char + 'III', uv_count, max_bones // 4, len(vert_bytes))
|
||||
output_data += vert_bytes
|
||||
|
||||
# Elem Buffer
|
||||
collection_primitives, element_bytes, elem_arr = mesh_maker.generate_element_buffer(coll_idx, endian_char)
|
||||
output_data += struct.pack(endian_char + 'I', len(element_bytes))
|
||||
output_data += element_bytes
|
||||
|
||||
# Index Buffer
|
||||
index_bytes = mesh_maker.generate_index_buffer(collection_primitives, endian_char, rigger)
|
||||
output_data += struct.pack(endian_char + 'I', len(index_bytes))
|
||||
output_data += index_bytes
|
||||
|
||||
# Generate rigging data
|
||||
skin_info = None
|
||||
if rigger:
|
||||
skin_info = rigger.generate_rigging_info(endian_char)
|
||||
|
||||
# Write final buffer
|
||||
final_data = bytearray()
|
||||
final_data = b'HMDL'
|
||||
if rigger:
|
||||
final_data += struct.pack(endian_char + 'IIII', 1, actual_max_texmtx_count, max_bone_count, len(skin_info))
|
||||
final_data += skin_info
|
||||
else:
|
||||
final_data += struct.pack(endian_char + 'II', 0, actual_max_texmtx_count)
|
||||
final_data += output_data
|
||||
|
||||
# Clean up
|
||||
for mesh in part_meshes:
|
||||
bpy.data.meshes.remove(mesh)
|
||||
|
||||
# Write final mesh object
|
||||
if area_db_id is not None:
|
||||
new_hash = 0
|
||||
else:
|
||||
new_hash = heclpak.add_object(final_data, b'HMDL', resource_name)
|
||||
res_db.update_resource_stats(db_id, new_hash)
|
||||
|
||||
return db_id, new_hash, final_data
|
||||
|
||||
|
||||
def panel_draw(self, context):
|
||||
layout = self.layout
|
||||
layout.prop_search(context.scene, 'hecl_mesh_obj', context.scene, 'objects')
|
||||
if not len(context.scene.hecl_mesh_obj):
|
||||
layout.label("Mesh not specified", icon='ERROR')
|
||||
elif context.scene.hecl_mesh_obj not in context.scene.objects:
|
||||
layout.label("'"+context.scene.hecl_mesh_obj+"' not in scene", icon='ERROR')
|
||||
else:
|
||||
obj = context.scene.objects[context.scene.hecl_mesh_obj]
|
||||
if obj.type != 'MESH':
|
||||
layout.label("'"+context.scene.hecl_mesh_obj+"' not a 'MESH'", icon='ERROR')
|
||||
layout.prop(obj.data, 'hecl_active_material')
|
||||
layout.prop(obj.data, 'hecl_material_count')
|
||||
|
||||
|
||||
# Debug register operators
|
||||
import bpy
|
||||
def register():
|
||||
bpy.types.Scene.hecl_mesh_obj = bpy.props.StringProperty(
|
||||
name='HECL Mesh Object',
|
||||
description='Blender Mesh Object to export during HECL\'s cook process')
|
||||
bpy.utils.register_class(hmdl_shader.hecl_shader_operator)
|
||||
pass
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(hmdl_shader.hecl_shader_operator)
|
||||
pass
|
|
@ -0,0 +1,216 @@
|
|||
'''
|
||||
RMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
This file provides a means to encode animation key-channels
|
||||
in an interleaved, sparse array for use by the runtime
|
||||
'''
|
||||
|
||||
import re
|
||||
import hashlib
|
||||
import struct
|
||||
import mathutils
|
||||
|
||||
# Hash bone name into truncated 28-bit integer
|
||||
def hashbone(name):
|
||||
return int.from_bytes(hashlib.sha1(name.encode()).digest()[:4], byteorder='big', signed=False) & 0xfffffff
|
||||
|
||||
# Regex RNA path matchers
|
||||
scale_matcher = re.compile(r'pose.bones\["(\S+)"\].scale')
|
||||
rotation_matcher = re.compile(r'pose.bones\["(\S+)"\].rotation')
|
||||
location_matcher = re.compile(r'pose.bones\["(\S+)"\].location')
|
||||
|
||||
# Effect transform modes
|
||||
EFFECT_XF_MODES = {'STATIONARY':0, 'WORLD':1, 'LOCAL':2}
|
||||
|
||||
# Generate animation info
|
||||
def generate_animation_info(action, res_db, rani_db_id, arg_package, endian_char='<'):
|
||||
|
||||
# Set of frame indices
|
||||
frame_set = set()
|
||||
|
||||
# Set of unique bone names
|
||||
bone_set = set()
|
||||
|
||||
# Scan through all fcurves to build animated bone set
|
||||
for fcurve in action.fcurves:
|
||||
data_path = fcurve.data_path
|
||||
scale_match = scale_matcher.match(data_path)
|
||||
rotation_match = rotation_matcher.match(data_path)
|
||||
location_match = location_matcher.match(data_path)
|
||||
|
||||
if scale_match:
|
||||
bone_set.add(scale_match.group(1))
|
||||
elif rotation_match:
|
||||
bone_set.add(rotation_match.group(1))
|
||||
elif location_match:
|
||||
bone_set.add(location_match.group(1))
|
||||
else:
|
||||
continue
|
||||
|
||||
# Count unified keyframes for interleaving channel data
|
||||
for key in fcurve.keyframe_points:
|
||||
frame_set.add(int(key.co[0]))
|
||||
|
||||
# Relate fcurves per-frame / per-bone and assemble data
|
||||
key_stream = bytearray()
|
||||
key_stream += struct.pack(endian_char + 'II', len(frame_set), len(bone_set))
|
||||
duration = action.frame_range[1] / action.rwk_fps
|
||||
interval = 1.0 / action.rwk_fps
|
||||
key_stream += struct.pack(endian_char + 'ff', duration, interval)
|
||||
|
||||
# Generate keyframe bitmap
|
||||
fr = int(round(action.frame_range[1]))
|
||||
key_stream += struct.pack(endian_char + 'I', fr)
|
||||
bitmap_words = [0] * (fr // 32)
|
||||
if fr % 32:
|
||||
bitmap_words.append(0)
|
||||
for i in range(fr):
|
||||
if i in frame_set:
|
||||
bitmap_words[i//32] |= 1 << i%32
|
||||
for word in bitmap_words:
|
||||
key_stream += struct.pack(endian_char + 'I', word)
|
||||
|
||||
|
||||
# Build bone table
|
||||
bone_list = []
|
||||
for bone in bone_set:
|
||||
fc_dict = dict()
|
||||
rotation_mode = None
|
||||
property_bits = 0
|
||||
for fcurve in action.fcurves:
|
||||
if fcurve.data_path == 'pose.bones["'+bone+'"].scale':
|
||||
if 'scale' not in fc_dict:
|
||||
fc_dict['scale'] = [None, None, None]
|
||||
property_bits |= 1
|
||||
fc_dict['scale'][fcurve.array_index] = fcurve
|
||||
elif fcurve.data_path == 'pose.bones["'+bone+'"].rotation_euler':
|
||||
if 'rotation_euler' not in fc_dict:
|
||||
fc_dict['rotation_euler'] = [None, None, None]
|
||||
rotation_mode = 'rotation_euler'
|
||||
property_bits |= 2
|
||||
fc_dict['rotation_euler'][fcurve.array_index] = fcurve
|
||||
elif fcurve.data_path == 'pose.bones["'+bone+'"].rotation_quaternion':
|
||||
if 'rotation_quaternion' not in fc_dict:
|
||||
fc_dict['rotation_quaternion'] = [None, None, None, None]
|
||||
rotation_mode = 'rotation_quaternion'
|
||||
property_bits |= 2
|
||||
fc_dict['rotation_quaternion'][fcurve.array_index] = fcurve
|
||||
elif fcurve.data_path == 'pose.bones["'+bone+'"].rotation_axis_angle':
|
||||
if 'rotation_axis_angle' not in fc_dict:
|
||||
fc_dict['rotation_axis_angle'] = [None, None, None, None]
|
||||
rotation_mode = 'rotation_axis_angle'
|
||||
property_bits |= 2
|
||||
fc_dict['rotation_axis_angle'][fcurve.array_index] = fcurve
|
||||
elif fcurve.data_path == 'pose.bones["'+bone+'"].location':
|
||||
if 'location' not in fc_dict:
|
||||
fc_dict['location'] = [None, None, None]
|
||||
property_bits |= 4
|
||||
fc_dict['location'][fcurve.array_index] = fcurve
|
||||
bone_list.append((bone, rotation_mode, fc_dict))
|
||||
bone_head = hashbone(bone)
|
||||
bone_head |= (property_bits << 28)
|
||||
key_stream += struct.pack(endian_char + 'I', bone_head)
|
||||
|
||||
# Interleave / interpolate keyframe data
|
||||
for frame in sorted(frame_set):
|
||||
for bone in bone_list:
|
||||
|
||||
bone_name = bone[0]
|
||||
rotation_mode = bone[1]
|
||||
fc_dict = bone[2]
|
||||
|
||||
# Scale curves
|
||||
if 'scale' in fc_dict:
|
||||
for comp in range(3):
|
||||
if fc_dict['scale'][comp]:
|
||||
key_stream += struct.pack(endian_char + 'f', fc_dict['scale'][comp].evaluate(frame))
|
||||
else:
|
||||
key_stream += struct.pack(endian_char + 'f', 0.0)
|
||||
|
||||
# Rotation curves
|
||||
if rotation_mode == 'rotation_quaternion':
|
||||
for comp in range(4):
|
||||
if fc_dict['rotation_quaternion'][comp]:
|
||||
key_stream += struct.pack(endian_char + 'f', fc_dict['rotation_quaternion'][comp].evaluate(frame))
|
||||
else:
|
||||
key_stream += struct.pack(endian_char + 'f', 0.0)
|
||||
|
||||
elif rotation_mode == 'rotation_euler':
|
||||
euler = [0.0, 0.0, 0.0]
|
||||
for comp in range(3):
|
||||
if fc_dict['rotation_euler'][comp]:
|
||||
euler[comp] = fc_dict['rotation_euler'][comp].evaluate(frame)
|
||||
euler_o = mathutils.Euler(euler, 'XYZ')
|
||||
quat = euler_o.to_quaternion()
|
||||
key_stream += struct.pack(endian_char + 'ffff', quat[0], quat[1], quat[2], quat[3])
|
||||
|
||||
elif rotation_mode == 'rotation_axis_angle':
|
||||
axis_angle = [0.0, 0.0, 0.0, 0.0]
|
||||
for comp in range(4):
|
||||
if fc_dict['rotation_axis_angle'][comp]:
|
||||
axis_angle[comp] = fc_dict['rotation_axis_angle'][comp].evaluate(frame)
|
||||
quat = mathutils.Quaternion(axis_angle[1:4], axis_angle[0])
|
||||
key_stream += struct.pack(endian_char + 'ffff', quat[0], quat[1], quat[2], quat[3])
|
||||
|
||||
# Location curves
|
||||
if 'location' in fc_dict:
|
||||
for comp in range(3):
|
||||
if fc_dict['location'][comp]:
|
||||
key_stream += struct.pack(endian_char + 'f', fc_dict['location'][comp].evaluate(frame))
|
||||
else:
|
||||
key_stream += struct.pack(endian_char + 'f', 0.0)
|
||||
|
||||
|
||||
# Generate event buffer
|
||||
event_buf = bytearray()
|
||||
if hasattr(action, 'rwk_events'):
|
||||
c1 = 0
|
||||
c2 = 0
|
||||
c3 = 0
|
||||
c4 = 0
|
||||
for event in action.rwk_events:
|
||||
if event.type == 'LOOP':
|
||||
c1 += 1
|
||||
elif event.type == 'UEVT':
|
||||
c2 += 1
|
||||
elif event.type == 'EFFECT':
|
||||
c3 += 1
|
||||
elif event.type == 'SOUND':
|
||||
c4 += 1
|
||||
event_buf += struct.pack(endian_char + 'IIII', c1, c2, c3, c4)
|
||||
|
||||
for event in action.rwk_events:
|
||||
if event.type == 'LOOP':
|
||||
event_buf += struct.pack(endian_char + 'fi', event.time, event.loop_data.bool)
|
||||
|
||||
for event in action.rwk_events:
|
||||
if event.type == 'UEVT':
|
||||
event_buf += struct.pack(endian_char + 'fii', event.time, event.uevt_data.type,
|
||||
hashbone(event.uevt_data.bone_name))
|
||||
|
||||
for event in action.rwk_events:
|
||||
if event.type == 'EFFECT':
|
||||
effect_db_id, effect_hash = res_db.search_for_resource(event.effect_data.uid, arg_package)
|
||||
if effect_hash:
|
||||
res_db.register_dependency(rani_db_id, effect_db_id)
|
||||
else:
|
||||
raise RuntimeError("Error - unable to find effect '{0}'".format(event.effect_data.uid))
|
||||
event_buf += struct.pack(endian_char + 'fiifi', event.time, event.effect_data.frame_count,
|
||||
hashbone(event.effect_data.bone_name), event.effect_data.scale,
|
||||
EFFECT_XF_MODES[event.effect_data.transform_mode])
|
||||
event_buf += effect_hash
|
||||
|
||||
for event in action.rwk_events:
|
||||
if event.type == 'SOUND':
|
||||
sid = int.from_bytes(event.sound_data.sound_id.encode()[:4], byteorder='big', signed=False)
|
||||
event_buf += struct.pack(endian_char + 'fIff', event.time, sid,
|
||||
event.sound_data.ref_amp, event.sound_data.ref_dist)
|
||||
|
||||
else:
|
||||
event_buf += struct.pack('IIII',0,0,0,0)
|
||||
|
||||
|
||||
|
||||
return key_stream + event_buf
|
||||
|
|
@ -0,0 +1,583 @@
|
|||
'''
|
||||
RMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
This file defines the `rmdl_draw_general` class to generate vertex+index
|
||||
buffers and mesh arrays to draw them. `PAR1` files also include bone-weight
|
||||
coefficients per-vertex for vertex-shader-driven skeletal evaluation.
|
||||
'''
|
||||
|
||||
import struct
|
||||
import bpy
|
||||
|
||||
class loop_vert:
|
||||
|
||||
def __init__(self, mesh, loop):
|
||||
self.mesh = mesh
|
||||
self.loop = loop
|
||||
|
||||
def __hash__(self):
|
||||
return (self.mesh, self.loop.index).__hash__()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.mesh == other.mesh and self.loop.index == other.loop.index
|
||||
|
||||
def __ne__(self, other):
|
||||
return self.mesh != other.mesh or self.loop.index != other.loop.index
|
||||
|
||||
def __str__(self):
|
||||
return (self.mesh, self.loop.index).__str__()
|
||||
|
||||
|
||||
# Round up to nearest 32 multiple
|
||||
def ROUND_UP_32(num):
|
||||
return (num + 31) & ~31
|
||||
|
||||
# Round up to nearest 4 multiple
|
||||
def ROUND_UP_4(num):
|
||||
return (num + 3) & ~3
|
||||
|
||||
# This routine conditionally inserts a loop into a multi-tiered
|
||||
# array/set collection; simultaneously relating verts to loops and
|
||||
# eliminating redundant loops (containing identical UV coordinates)
|
||||
def _augment_loop_vert_array(lv_array, mesh, loop, uv_count):
|
||||
|
||||
# Create loop_vert object for comparitive testing
|
||||
lv = loop_vert(mesh, loop)
|
||||
|
||||
# First perform quick check to see if loop is already in a set
|
||||
for existing_loop_set in lv_array:
|
||||
if lv in existing_loop_set:
|
||||
return
|
||||
|
||||
# Now perform extended check to see if any UV coordinate values already match
|
||||
for existing_loop_set in lv_array:
|
||||
for existing_loop in existing_loop_set:
|
||||
matches = True
|
||||
for uv_layer_idx in range(uv_count):
|
||||
uv_layer = mesh.uv_layers[uv_layer_idx]
|
||||
existing_uv_coords = uv_layer.data[existing_loop.loop.index].uv
|
||||
check_uv_coords = uv_layer.data[loop.index].uv
|
||||
if (existing_uv_coords[0] != check_uv_coords[0] or
|
||||
existing_uv_coords[1] != check_uv_coords[1]):
|
||||
matches = False
|
||||
break
|
||||
if matches:
|
||||
existing_loop_set.append(lv)
|
||||
return
|
||||
|
||||
# If we get here, no match found; add new set to `lv_array`
|
||||
lv_array.append([lv])
|
||||
|
||||
|
||||
# Get loop set from collection generated with above method;
|
||||
# containing a specified loop
|
||||
def _get_loop_set(lv_array, mesh, loop):
|
||||
|
||||
# Create loop_vert object for comparitive testing
|
||||
lv = loop_vert(mesh, loop)
|
||||
|
||||
for existing_loop_set in lv_array:
|
||||
if lv in existing_loop_set:
|
||||
return existing_loop_set
|
||||
return None
|
||||
|
||||
|
||||
# Method to find triangle opposite another triangle over two vert-indices
|
||||
def _find_polygon_opposite_idxs(mesh, original_triangle, a_idx, b_idx):
|
||||
|
||||
for triangle in mesh.polygons:
|
||||
|
||||
if triangle == original_triangle:
|
||||
continue
|
||||
|
||||
if (a_idx in triangle.vertices and b_idx in triangle.vertices):
|
||||
return triangle
|
||||
|
||||
return None
|
||||
|
||||
# Method to find triangle opposite another triangle over two loop-vert sets
|
||||
def _find_polygon_opposite_lvs(mesh, original_triangle, lv_a, lv_b):
|
||||
a_idx = lv_a[0].loop.vertex_index
|
||||
b_idx = lv_b[0].loop.vertex_index
|
||||
return _find_polygon_opposite_idxs(mesh, original_triangle, a_idx, b_idx)
|
||||
|
||||
|
||||
|
||||
|
||||
class rmdl_mesh:
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# 4-byte ID string used in generated RMDL file
|
||||
self.file_identifier = '_GEN'
|
||||
|
||||
# Array that holds collections. A collection is a 16-bit index
|
||||
# worth of vertices, elements referencing them, and a
|
||||
# primitive array to draw them
|
||||
self.collections = []
|
||||
|
||||
|
||||
|
||||
# If vertex index space is exceeded for a single additional vertex,
|
||||
# a new collection is created and returned by this routine
|
||||
def _check_collection_overflow(self, mesh, collection, rigger, uv_count):
|
||||
max_bone_count = 0;
|
||||
if rigger:
|
||||
max_bone_count = rigger.max_bone_count
|
||||
if not collection or len(collection['vertices']) >= 65535:
|
||||
new_collection = {'uv_count':uv_count, 'max_bone_count':max_bone_count, 'vertices':[], 'vert_weights':[], 'tri_strips':[]}
|
||||
self.collections.append(new_collection)
|
||||
return new_collection, True
|
||||
else:
|
||||
return collection, False
|
||||
|
||||
|
||||
# Augments draw generator with a single blender MESH data object
|
||||
def add_mesh(self, mesh, rigger, uv_count):
|
||||
max_bone_count = 0;
|
||||
if rigger:
|
||||
max_bone_count = rigger.max_bone_count
|
||||
|
||||
print("Optimizing mesh:", mesh.name)
|
||||
opt_gpu_vert_count = 0
|
||||
|
||||
# First, generate compressed loop-vertex array-array-set collection
|
||||
loop_vert_array = []
|
||||
for vert in mesh.vertices:
|
||||
loop_verts = []
|
||||
for loop in mesh.loops:
|
||||
if loop.vertex_index == vert.index:
|
||||
_augment_loop_vert_array(loop_verts, mesh, loop, uv_count)
|
||||
loop_vert_array.append(loop_verts)
|
||||
|
||||
|
||||
# Find best collection to add mesh data into
|
||||
best_collection = None
|
||||
for collection in self.collections:
|
||||
if (collection['uv_count'] == uv_count and
|
||||
collection['max_bone_count'] == max_bone_count and
|
||||
len(collection['vertices']) < 65000):
|
||||
best_collection = collection
|
||||
break
|
||||
if not best_collection:
|
||||
# Create a new one if no good one found
|
||||
best_collection, is_new_collection = self._check_collection_overflow(mesh, None, rigger, uv_count)
|
||||
|
||||
# If rigging, start an array of bone names to be bound to contiguous tri-strips
|
||||
tri_strip_bones = []
|
||||
tri_strip_bones_overflow = False
|
||||
|
||||
# Now begin generating draw primitives
|
||||
visited_polys = set()
|
||||
for poly in mesh.polygons:
|
||||
# Skip if already visited
|
||||
if poly in visited_polys:
|
||||
continue
|
||||
|
||||
# Allows restart if initial polygon was not added
|
||||
good = False
|
||||
while not good:
|
||||
|
||||
# Begin a tri-strip primitive (array of vert indices)
|
||||
tri_strip = []
|
||||
|
||||
# Temporary references to trace out strips of triangles
|
||||
temp_poly = poly
|
||||
|
||||
# Rolling references of last two emitted loop-vert sets (b is older)
|
||||
last_loop_vert_a = None
|
||||
last_loop_vert_b = None
|
||||
|
||||
# In the event of vertex-buffer overflow, this will be made true;
|
||||
# resulting in the immediate splitting of a tri-strip
|
||||
is_new_collection = False
|
||||
|
||||
|
||||
|
||||
# As long as there is a connected polygon to visit
|
||||
while temp_poly:
|
||||
if 0 == len(tri_strip): # First triangle in strip
|
||||
|
||||
# Order the loops so the last two connect to a next polygon
|
||||
idx0 = mesh.loops[temp_poly.loop_indices[0]].vertex_index
|
||||
idx1 = mesh.loops[temp_poly.loop_indices[1]].vertex_index
|
||||
idx2 = mesh.loops[temp_poly.loop_indices[2]].vertex_index
|
||||
if not _find_polygon_opposite_idxs(mesh, temp_poly, idx1, idx2):
|
||||
loop_idxs = [temp_poly.loop_indices[2], temp_poly.loop_indices[0], temp_poly.loop_indices[1]]
|
||||
if not _find_polygon_opposite_idxs(mesh, temp_poly, idx0, idx1):
|
||||
loop_idxs = [temp_poly.loop_indices[1], temp_poly.loop_indices[2], temp_poly.loop_indices[0]]
|
||||
else:
|
||||
loop_idxs = temp_poly.loop_indices
|
||||
|
||||
# Add three loop-vert vertices to tri-strip
|
||||
for poly_loop_idx in loop_idxs:
|
||||
poly_loop = mesh.loops[poly_loop_idx]
|
||||
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
|
||||
|
||||
# If rigging, ensure that necessary bones are available and get weights
|
||||
weights = None
|
||||
if rigger:
|
||||
weights = rigger.augment_bone_array_with_lv(mesh, tri_strip_bones, loop_vert)
|
||||
if weights is None:
|
||||
tri_strip_bones_overflow = True
|
||||
break
|
||||
|
||||
if loop_vert not in best_collection['vertices']:
|
||||
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger, uv_count)
|
||||
if is_new_collection:
|
||||
break
|
||||
best_collection['vertices'].append(loop_vert)
|
||||
best_collection['vert_weights'].append(weights)
|
||||
tri_strip.append(best_collection['vertices'].index(loop_vert))
|
||||
last_loop_vert_b = last_loop_vert_a
|
||||
last_loop_vert_a = loop_vert
|
||||
opt_gpu_vert_count += 1
|
||||
#print('appended initial loop', loop_vert[0].loop.index)
|
||||
|
||||
if is_new_collection or tri_strip_bones_overflow:
|
||||
break
|
||||
|
||||
|
||||
else: # Not the first triangle in strip; look up all three loop-verts,
|
||||
# ensure it matches last-2 rolling reference, emit remaining loop-vert
|
||||
|
||||
# Iterate loop verts
|
||||
odd_loop_vert_out = None
|
||||
loop_vert_match_count = 0
|
||||
for poly_loop_idx in temp_poly.loop_indices:
|
||||
poly_loop = mesh.loops[poly_loop_idx]
|
||||
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
|
||||
|
||||
if (loop_vert == last_loop_vert_a or loop_vert == last_loop_vert_b):
|
||||
loop_vert_match_count += 1
|
||||
continue
|
||||
|
||||
odd_loop_vert_out = loop_vert
|
||||
|
||||
|
||||
# Ensure there are two existing matches to continue tri-strip
|
||||
if loop_vert_match_count != 2 or not odd_loop_vert_out:
|
||||
break
|
||||
|
||||
|
||||
# If rigging, ensure that necessary bones are available and get weights
|
||||
weights = None
|
||||
if rigger:
|
||||
weights = rigger.augment_bone_array_with_lv(mesh, tri_strip_bones, odd_loop_vert_out)
|
||||
if weights is None:
|
||||
tri_strip_bones_overflow = True
|
||||
break
|
||||
|
||||
|
||||
# Add to tri-strip
|
||||
if odd_loop_vert_out not in best_collection['vertices']:
|
||||
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger, uv_count)
|
||||
if is_new_collection:
|
||||
break
|
||||
best_collection['vertices'].append(odd_loop_vert_out)
|
||||
best_collection['vert_weights'].append(weights)
|
||||
tri_strip.append(best_collection['vertices'].index(odd_loop_vert_out))
|
||||
last_loop_vert_b = last_loop_vert_a
|
||||
last_loop_vert_a = odd_loop_vert_out
|
||||
opt_gpu_vert_count += 1
|
||||
|
||||
|
||||
|
||||
# This polygon is good
|
||||
visited_polys.add(temp_poly)
|
||||
|
||||
|
||||
# Find a polygon directly connected to this one to continue strip
|
||||
temp_poly = _find_polygon_opposite_lvs(mesh, temp_poly, last_loop_vert_a, last_loop_vert_b)
|
||||
if temp_poly in visited_polys:
|
||||
temp_poly = None
|
||||
|
||||
|
||||
|
||||
# Add tri-strip to element array
|
||||
if len(tri_strip):
|
||||
best_collection['tri_strips'].append({'mesh':mesh, 'strip':tri_strip, 'strip_bones':tri_strip_bones})
|
||||
good = True
|
||||
if tri_strip_bones_overflow:
|
||||
tri_strip_bones = []
|
||||
tri_strip_bones_overflow = False
|
||||
|
||||
print("Mesh contains", len(mesh.polygons), "triangles")
|
||||
print("Vert count: (%d -> %d)\n" % (len(mesh.loops), opt_gpu_vert_count))
|
||||
|
||||
|
||||
|
||||
# Generate binary vertex buffer of collection index
|
||||
def generate_vertex_buffer(self, index, endian_char):
|
||||
collection = self.collections[index]
|
||||
if not collection:
|
||||
return None
|
||||
|
||||
# Positions output
|
||||
pos_out = []
|
||||
|
||||
# Generate vert buffer struct
|
||||
vstruct = struct.Struct(endian_char + 'f')
|
||||
|
||||
# If rigging, determine maximum number of bones in this collection
|
||||
max_bones = 0
|
||||
for i in range(len(collection['vertices'])):
|
||||
weight_count = 0
|
||||
if collection['vert_weights'][i]:
|
||||
weight_count = len(collection['vert_weights'][i])
|
||||
if weight_count > max_bones:
|
||||
max_bones = weight_count
|
||||
max_bones = ROUND_UP_4(max_bones)
|
||||
|
||||
# Build byte array
|
||||
vert_bytes = bytearray()
|
||||
for i in range(len(collection['vertices'])):
|
||||
loop_vert = collection['vertices'][i]
|
||||
bloop = loop_vert[0]
|
||||
mesh = bloop.mesh
|
||||
bvert = mesh.vertices[bloop.loop.vertex_index]
|
||||
#print(bvert.co)
|
||||
|
||||
# Position
|
||||
pos_out.append((bvert.co, bvert.normal))
|
||||
for comp in range(4):
|
||||
if comp in range(len(bvert.co)):
|
||||
vert_bytes += vstruct.pack(bvert.co[comp])
|
||||
else:
|
||||
vert_bytes += vstruct.pack(0.0)
|
||||
|
||||
# Normal
|
||||
for comp in range(4):
|
||||
if comp in range(len(bvert.normal)):
|
||||
vert_bytes += vstruct.pack(bvert.normal[comp])
|
||||
else:
|
||||
vert_bytes += vstruct.pack(0.0)
|
||||
|
||||
# Weights
|
||||
weights = collection['vert_weights'][i]
|
||||
for j in range(max_bones):
|
||||
if j < len(weights):
|
||||
vert_bytes += vstruct.pack(weights[j])
|
||||
else:
|
||||
vert_bytes += vstruct.pack(0.0)
|
||||
|
||||
# UVs
|
||||
added_uvs = 0
|
||||
for uv_idx in range(collection['uv_count']):
|
||||
coords = mesh.uv_layers[uv_idx].data[bloop.loop.index].uv
|
||||
vert_bytes += vstruct.pack(coords[0])
|
||||
vert_bytes += vstruct.pack(-coords[1])
|
||||
added_uvs += 1
|
||||
|
||||
# Pad to 16-byte alignment
|
||||
if added_uvs & 1:
|
||||
vert_bytes += vstruct.pack(0.0)
|
||||
vert_bytes += vstruct.pack(0.0)
|
||||
|
||||
|
||||
return collection['uv_count'], max_bones, vert_bytes, pos_out
|
||||
|
||||
|
||||
# Generate binary element buffer of collection index
|
||||
def generate_element_buffer(self, index, endian_char):
|
||||
collection = self.collections[index]
|
||||
if not collection:
|
||||
return None
|
||||
|
||||
# Numeric array out
|
||||
arr_out = []
|
||||
|
||||
# Generate element buffer struct
|
||||
estruct = struct.Struct(endian_char + 'H')
|
||||
|
||||
# Build mesh-primitive hierarchy
|
||||
last_mesh = collection['tri_strips'][0]['mesh']
|
||||
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
|
||||
collection_primitives = [mesh_primitives]
|
||||
|
||||
# Collection element byte-array
|
||||
cur_offset = 0
|
||||
element_bytes = bytearray()
|
||||
|
||||
# Last element index entry and strip length for forming degenerate strip
|
||||
last_elem = None
|
||||
strip_len = 0
|
||||
|
||||
# Last strip bone array (for rigging)
|
||||
last_strip_bones = collection['tri_strips'][0]['strip_bones']
|
||||
|
||||
# Build single degenerate tri-strip
|
||||
for strip in collection['tri_strips']:
|
||||
#print('new strip', collection['tri_strips'].index(strip))
|
||||
|
||||
if last_mesh != strip['mesh'] or last_strip_bones != strip['strip_bones']:
|
||||
#print('splitting primitive')
|
||||
# New mesh; force new strip
|
||||
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
|
||||
cur_offset += strip_len
|
||||
last_elem = None
|
||||
strip_len = 0
|
||||
last_mesh = strip['mesh']
|
||||
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
|
||||
collection_primitives.append(mesh_primitives)
|
||||
|
||||
elif last_elem is not None:
|
||||
#print('extending primitive')
|
||||
# Existing mesh being extended as degenerate strip
|
||||
strip_len += 2
|
||||
element_bytes += estruct.pack(last_elem)
|
||||
element_bytes += estruct.pack(strip['strip'][0])
|
||||
arr_out.append(last_elem)
|
||||
arr_out.append(strip['strip'][0])
|
||||
|
||||
# If current element count is odd, add additional degenerate strip to make it even
|
||||
# This ensures that the sub-strip has proper winding-order for backface culling
|
||||
if (strip_len & 1):
|
||||
strip_len += 1
|
||||
element_bytes += estruct.pack(strip['strip'][0])
|
||||
arr_out.append(strip['strip'][0])
|
||||
|
||||
|
||||
# Primitive tri-strip byte array
|
||||
for idx in strip['strip']:
|
||||
#print(idx)
|
||||
strip_len += 1
|
||||
element_bytes += estruct.pack(idx)
|
||||
arr_out.append(idx)
|
||||
last_elem = idx
|
||||
|
||||
# Final mesh entry
|
||||
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
|
||||
cur_offset += strip_len
|
||||
|
||||
return collection_primitives, element_bytes, arr_out
|
||||
|
||||
|
||||
# Generate binary draw-index buffer of collection index
|
||||
def generate_index_buffer(self, collection_primitives, endian_char, rigger):
|
||||
|
||||
# Bytearray to fill
|
||||
index_bytes = bytearray()
|
||||
|
||||
# Submesh count
|
||||
index_bytes += struct.pack(endian_char + 'I', len(collection_primitives))
|
||||
|
||||
# And array
|
||||
for mesh in collection_primitives:
|
||||
|
||||
# Primitive count
|
||||
index_bytes += struct.pack(endian_char + 'I', len(mesh['primitives']))
|
||||
|
||||
# Primitive array
|
||||
for prim in mesh['primitives']:
|
||||
|
||||
# If rigging, append skin index
|
||||
if rigger:
|
||||
skin_index = rigger.augment_skin(prim['bones'])
|
||||
index_bytes += struct.pack(endian_char + 'I', skin_index)
|
||||
|
||||
index_bytes += struct.pack(endian_char + 'I', 2)
|
||||
index_bytes += struct.pack(endian_char + 'I', prim['offset'])
|
||||
index_bytes += struct.pack(endian_char + 'I', prim['length'])
|
||||
|
||||
return index_bytes
|
||||
|
||||
|
||||
# C-generation operator
|
||||
import bmesh
|
||||
class rmdl_mesh_operator(bpy.types.Operator):
|
||||
bl_idname = "scene.rmdl_mesh"
|
||||
bl_label = "RMDL C mesh maker"
|
||||
bl_description = "RMDL Mesh source generation utility"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.object and context.object.type == 'MESH'
|
||||
|
||||
def execute(self, context):
|
||||
copy_mesh = context.object.data.copy()
|
||||
copy_obj = context.object.copy()
|
||||
copy_obj.data = copy_mesh
|
||||
|
||||
bm = bmesh.new()
|
||||
bm.from_mesh(copy_mesh)
|
||||
bmesh.ops.triangulate(bm, faces=bm.faces)
|
||||
#to_remove = []
|
||||
#for face in bm.faces:
|
||||
# if face.material_index != 7:
|
||||
# to_remove.append(face)
|
||||
#bmesh.ops.delete(bm, geom=to_remove, context=5)
|
||||
bm.to_mesh(copy_mesh)
|
||||
bm.free()
|
||||
|
||||
context.scene.objects.link(copy_obj)
|
||||
rmesh = rmdl_mesh()
|
||||
rmesh.add_mesh(copy_mesh, None, 0)
|
||||
|
||||
str_out = '/* Vertex Buffer */\nstatic const float VERT_BUF[] = {\n'
|
||||
vert_arr = rmesh.generate_vertex_buffer(0, '<')[3]
|
||||
for v in vert_arr:
|
||||
str_out += ' %f, %f, %f, 0.0, %f, %f, %f, 0.0,\n' % (v[0][0], v[0][1], v[0][2], v[1][0], v[1][1], v[1][2])
|
||||
ebuf_arr = rmesh.generate_element_buffer(0, '<')[2]
|
||||
str_out += '};\n\n/* Element Buffer */\n#define ELEM_BUF_COUNT %d\nstatic const u16 ELEM_BUF[] = {\n' % len(ebuf_arr)
|
||||
for e in ebuf_arr:
|
||||
str_out += ' %d,\n' % e
|
||||
str_out += '};\n'
|
||||
|
||||
context.scene.objects.unlink(copy_obj)
|
||||
bpy.data.objects.remove(copy_obj)
|
||||
bpy.data.meshes.remove(copy_mesh)
|
||||
|
||||
context.window_manager.clipboard = str_out
|
||||
self.report({'INFO'}, "Wrote mesh C to clipboard")
|
||||
return {'FINISHED'}
|
||||
|
||||
# 2D C-generation operator
|
||||
import bmesh
|
||||
class rmdl_mesh2d_operator(bpy.types.Operator):
|
||||
bl_idname = "scene.rmdl_mesh2d"
|
||||
bl_label = "RMDL C 2D mesh maker"
|
||||
bl_description = "RMDL 2D Mesh source generation utility"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.object and context.object.type == 'MESH'
|
||||
|
||||
def execute(self, context):
|
||||
copy_mesh = context.object.data.copy()
|
||||
copy_obj = context.object.copy()
|
||||
copy_obj.data = copy_mesh
|
||||
|
||||
bm = bmesh.new()
|
||||
bm.from_mesh(copy_mesh)
|
||||
bmesh.ops.triangulate(bm, faces=bm.faces)
|
||||
#to_remove = []
|
||||
#for face in bm.faces:
|
||||
# if face.material_index != 7:
|
||||
# to_remove.append(face)
|
||||
#bmesh.ops.delete(bm, geom=to_remove, context=5)
|
||||
bm.to_mesh(copy_mesh)
|
||||
bm.free()
|
||||
|
||||
context.scene.objects.link(copy_obj)
|
||||
rmesh = rmdl_mesh()
|
||||
rmesh.add_mesh(copy_mesh, None, 0)
|
||||
|
||||
str_out = '/* Vertex Buffer */\nstatic const float VERT_BUF[] = {\n'
|
||||
vert_arr = rmesh.generate_vertex_buffer(0, '<')[3]
|
||||
for v in vert_arr:
|
||||
str_out += ' %f, %f,\n' % (v[0][0], v[0][2])
|
||||
ebuf_arr = rmesh.generate_element_buffer(0, '<')[2]
|
||||
str_out += '};\n\n/* Element Buffer */\n#define ELEM_BUF_COUNT %d\nstatic const u16 ELEM_BUF[] = {\n' % len(ebuf_arr)
|
||||
for e in ebuf_arr:
|
||||
str_out += ' %d,\n' % e
|
||||
str_out += '};\n'
|
||||
|
||||
context.scene.objects.unlink(copy_obj)
|
||||
bpy.data.objects.remove(copy_obj)
|
||||
bpy.data.meshes.remove(copy_mesh)
|
||||
|
||||
context.window_manager.clipboard = str_out
|
||||
self.report({'INFO'}, "Wrote mesh C to clipboard")
|
||||
return {'FINISHED'}
|
|
@ -0,0 +1,298 @@
|
|||
'''
|
||||
HMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
Traces the 'Blender Internal' shader node structure to generate a
|
||||
HECL combiner string
|
||||
'''
|
||||
|
||||
# Trace color node structure
|
||||
def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
|
||||
|
||||
if node.type == 'OUTPUT':
|
||||
if node.inputs['Color'].is_linked:
|
||||
return recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs['Color'].links[0].from_node, node.inputs['Color'].links[0].from_socket)
|
||||
else:
|
||||
return 'vec3(%f, %f, %f)' % (node.inputs['Color'].default_value[0],
|
||||
node.inputs['Color'].default_value[1],
|
||||
node.inputs['Color'].default_value[2])
|
||||
|
||||
elif node.type == 'MIX_RGB':
|
||||
|
||||
if node.inputs[1].is_linked:
|
||||
a_input = recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
|
||||
else:
|
||||
a_input = 'vec3(%f, %f, %f)' % (node.inputs[1].default_value[0],
|
||||
node.inputs[1].default_value[1],
|
||||
node.inputs[1].default_value[2])
|
||||
|
||||
if node.inputs[2].is_linked:
|
||||
b_input = recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs[2].links[0].from_node, node.inputs[2].links[0].from_socket)
|
||||
else:
|
||||
b_input = 'vec3(%f, %f, %f)' % (node.inputs[2].default_value[0],
|
||||
node.inputs[2].default_value[1],
|
||||
node.inputs[2].default_value[2])
|
||||
|
||||
if node.blend_type == 'MULTIPLY':
|
||||
return '(%s * %s)' % (a_input, b_input)
|
||||
elif node.blend_type == 'ADD':
|
||||
return '(%s + %s)' % (a_input, b_input)
|
||||
else:
|
||||
raise RuntimeError("RMDL does not support shaders with '{0}' blending modes".format(node.blend_type))
|
||||
|
||||
elif node.type == 'TEXTURE':
|
||||
|
||||
if not node.inputs['Vector'].is_linked:
|
||||
raise RuntimeError("RMDL texture nodes must have a 'Geometry', 'Group' UV modifier node linked")
|
||||
|
||||
# Determine matrix generator type
|
||||
matrix_str = None
|
||||
soc_from = node.inputs['Vector'].links[0].from_socket
|
||||
|
||||
if soc_from.node.type == 'GROUP':
|
||||
matrix_str = '%s(' % soc_from.node.node_tree.name
|
||||
for s in range(len(soc_from.node.inputs)-1):
|
||||
soc = soc_from.node.inputs[s+1]
|
||||
if len(soc.links):
|
||||
raise RuntimeError("UV Modifier nodes may not have parameter links (default values only)")
|
||||
ncomps = len(soc.default_value)
|
||||
if ncomps > 1:
|
||||
matrix_str += 'vec%d(' % ncomps
|
||||
for c in ncomps-1:
|
||||
matrix_str += '%f, ' % soc.default_value[c]
|
||||
matrix_str += '%f)' % soc.default_value[ncomps-1]
|
||||
else:
|
||||
matrix_str += '%f' % soc.default_value
|
||||
|
||||
if s == len(soc_from.node.inputs)-2:
|
||||
matrix_str += ')'
|
||||
else:
|
||||
matrix_str += ', '
|
||||
|
||||
soc_from = soc_from.node.inputs[0].links[0].from_socket
|
||||
|
||||
elif soc_from.node.type == 'GEOMETRY':
|
||||
pass
|
||||
|
||||
else:
|
||||
raise RuntimeError("RMDL texture nodes must have a 'Geometry', 'Group' UV modifier node linked")
|
||||
|
||||
if soc_from.node.type != 'GEOMETRY':
|
||||
raise RuntimeError("Matrix animator nodes must connect to 'Geometry' node")
|
||||
|
||||
|
||||
# Resolve map and matrix index
|
||||
node_label = soc_from.node.label
|
||||
if not matrix_str and node_label.startswith('MTX_'):
|
||||
matrix_str = 'hecl_TexMtx[%d]' % int(node_label[4:])
|
||||
|
||||
if soc_from.name == 'UV':
|
||||
uv_name = soc_from.node.uv_layer
|
||||
uv_idx = mesh_obj.data.uv_layers.find(uv_name)
|
||||
uvsource_str = 'hecl_TexCoord[%d]' % uv_idx
|
||||
|
||||
elif soc_from.name == 'Normal':
|
||||
uvsource_str = 'hecl_TexCoordModelViewNormal'
|
||||
|
||||
elif soc_from.name == 'View':
|
||||
uvsource_str = 'hecl_TexCoordModelViewPosition'
|
||||
|
||||
else:
|
||||
raise RuntimeError("Only the 'UV', 'Normal' and 'View' sockets may be used from 'Geometry' nodes")
|
||||
|
||||
if socket.name == 'Value':
|
||||
if matrix_str:
|
||||
return 'texture("%s:%s", %s, %s).a' % (blend_path, node.texture.name, uvsource_str, matrix_str)
|
||||
else:
|
||||
return 'texture("%s:%s", %s).a' % (blend_path, node.texture.name, uvsource_str)
|
||||
if socket.name == 'Color':
|
||||
if matrix_str:
|
||||
return 'texture("%s:%s", %s, %s)' % (blend_path, node.texture.name, uvsource_str, matrix_str)
|
||||
else:
|
||||
return 'texture("%s:%s", %s)' % (blend_path, node.texture.name, uvsource_str)
|
||||
else:
|
||||
raise RuntimeError("Only the 'Value' or 'Color' output sockets may be used from Texture nodes")
|
||||
|
||||
elif node.type == 'RGB':
|
||||
|
||||
if node.label.startswith('DYNAMIC_'):
|
||||
dynamic_index = int(node.label[8:])
|
||||
return 'hecl_KColor[%d]' % dynamic_index
|
||||
|
||||
return '%f' % node.outputs['Color'].default_value
|
||||
|
||||
elif node.type == 'MATERIAL':
|
||||
|
||||
if mat_obj.use_shadeless:
|
||||
return 'vec3(1.0)'
|
||||
else:
|
||||
return 'hecl_Lighting'
|
||||
|
||||
else:
|
||||
raise RuntimeError("RMDL is unable to process '{0}' shader nodes in '{1}'".format(node.type, mat_obj.name))
|
||||
|
||||
|
||||
|
||||
# Trace alpha node structure
|
||||
def recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
|
||||
|
||||
if node.type == 'OUTPUT':
|
||||
if node.inputs['Alpha'].is_linked:
|
||||
return recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node.inputs['Alpha'].links[0].from_node, node.inputs['Alpha'].links[0].from_socket)
|
||||
else:
|
||||
return '%f' % node.inputs['Alpha'].default_value
|
||||
|
||||
elif node.type == 'MATH':
|
||||
|
||||
if node.inputs[0].is_linked:
|
||||
a_input = recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node.inputs[0].links[0].from_node, node.inputs[0].links[0].from_socket)
|
||||
else:
|
||||
a_input = '%f' % node.inputs[0].default_value
|
||||
|
||||
if node.inputs[1].is_linked:
|
||||
b_input = recursive_alpha_trace(plat, mat_obj, mesh_obj, tex_list, mtx_dict, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
|
||||
else:
|
||||
b_input = '%f' % node.inputs[1].default_value
|
||||
|
||||
if node.operation == 'MULTIPLY':
|
||||
return '(%s * %s)' % (a_input, b_input)
|
||||
elif node.operation == 'ADD':
|
||||
return '(%s + %s)' % (a_input, b_input)
|
||||
else:
|
||||
raise RuntimeError("RMDL does not support shaders with '{0}' blending modes".format(node.operation))
|
||||
|
||||
elif node.type == 'TEXTURE':
|
||||
|
||||
if not node.inputs['Vector'].is_linked:
|
||||
raise RuntimeError("RMDL texture nodes must have a 'Geometry', 'Group' UV modifier node linked")
|
||||
|
||||
# Determine matrix generator type
|
||||
matrix_str = None
|
||||
soc_from = node.inputs['Vector'].links[0].from_socket
|
||||
|
||||
if soc_from.node.type == 'GROUP':
|
||||
matrix_str = '%s(' % soc_from.node.node_tree.name
|
||||
for s in range(len(soc_from.node.inputs)-1):
|
||||
soc = soc_from.node.inputs[s+1]
|
||||
if len(soc.links):
|
||||
raise RuntimeError("UV Modifier nodes may not have parameter links (default values only)")
|
||||
ncomps = len(soc.default_value)
|
||||
if ncomps > 1:
|
||||
matrix_str += 'vec%d(' % ncomps
|
||||
for c in ncomps-1:
|
||||
matrix_str += '%f, ' % soc.default_value[c]
|
||||
matrix_str += '%f)' % soc.default_value[ncomps-1]
|
||||
else:
|
||||
matrix_str += '%f' % soc.default_value
|
||||
|
||||
if s == len(soc_from.node.inputs)-2:
|
||||
matrix_str += ')'
|
||||
else:
|
||||
matrix_str += ', '
|
||||
|
||||
soc_from = soc_from.node.inputs[0].links[0].from_socket
|
||||
|
||||
elif soc_from.node.type == 'GEOMETRY':
|
||||
pass
|
||||
|
||||
else:
|
||||
raise RuntimeError("RMDL texture nodes must have a 'Geometry', 'Group' UV modifier node linked")
|
||||
|
||||
if soc_from.node.type != 'GEOMETRY':
|
||||
raise RuntimeError("Matrix animator nodes must connect to 'Geometry' node")
|
||||
|
||||
|
||||
# Resolve map and matrix index
|
||||
node_label = soc_from.node.label
|
||||
if not matrix_str and node_label.startswith('MTX_'):
|
||||
matrix_str = 'hecl_TexMtx[%d]' % int(node_label[4:])
|
||||
|
||||
if soc_from.name == 'UV':
|
||||
uv_name = soc_from.node.uv_layer
|
||||
uv_idx = mesh_obj.data.uv_layers.find(uv_name)
|
||||
uvsource_str = 'hecl_TexCoord[%d]' % uv_idx
|
||||
|
||||
elif soc_from.name == 'Normal':
|
||||
uvsource_str = 'hecl_TexCoordModelViewNormal'
|
||||
|
||||
elif soc_from.name == 'View':
|
||||
uvsource_str = 'hecl_TexCoordModelViewPosition'
|
||||
|
||||
else:
|
||||
raise RuntimeError("Only the 'UV', 'Normal' and 'View' sockets may be used from 'Geometry' nodes")
|
||||
|
||||
if socket.name == 'Value':
|
||||
if matrix_str:
|
||||
return 'texture("%s:%s", %s, %s).a' % (blend_path, node.texture.name, uvsource_str, matrix_str)
|
||||
else:
|
||||
return 'texture("%s:%s", %s).a' % (blend_path, node.texture.name, uvsource_str)
|
||||
else:
|
||||
raise RuntimeError("Only the 'Value' output sockets may be used from Texture nodes")
|
||||
|
||||
elif node.type == 'VALUE':
|
||||
|
||||
if node.label.startswith('DYNAMIC_'):
|
||||
dynamic_index = int(node.label[8:])
|
||||
return 'hecl_KColor[%d].a' % dynamic_index
|
||||
|
||||
return '%f' % node.outputs['Value'].default_value
|
||||
|
||||
elif node.type == 'MATERIAL':
|
||||
|
||||
return '1.0'
|
||||
|
||||
else:
|
||||
raise RuntimeError("RMDL is unable to process '{0}' shader nodes in '{1}'".format(node.type, mat_obj.name))
|
||||
|
||||
|
||||
|
||||
def shader(mat_obj, mesh_obj, blend_path):
|
||||
|
||||
if not mat_obj.use_nodes:
|
||||
raise RuntimeError("RMDL *requires* that shader nodes are used; '{0}' does not".format(mat_obj.name))
|
||||
|
||||
if 'Output' not in mat_obj.node_tree.nodes or mat_obj.node_tree.nodes['Output'].type != 'OUTPUT':
|
||||
raise RuntimeError("RMDL *requires* that an OUTPUT shader node named 'Output' is present")
|
||||
|
||||
# Root (output) node
|
||||
output_node = mat_obj.node_tree.nodes['Output']
|
||||
|
||||
# Trace nodes and build result
|
||||
color_trace_result = recursive_color_trace(mat_obj, mesh_obj, blend_path, output_node)
|
||||
alpha_trace_result = recursive_alpha_trace(mat_obj, mesh_obj, blend_path, output_node)
|
||||
|
||||
blend_src = 'hecl_One'
|
||||
blend_dest = 'hecl_Zero'
|
||||
if mat_obj.game_settings.alpha_blend == 'ALPHA' or mat_obj.game_settings.alpha_blend == 'ALPHA_SORT':
|
||||
blend_src = 'hecl_SrcAlpha'
|
||||
blend_dest = 'hecl_OneMinusSrcAlpha'
|
||||
elif mat_obj.game_settings.alpha_blend == 'ADD':
|
||||
blend_src = 'hecl_SrcAlpha'
|
||||
blend_dest = 'hecl_One'
|
||||
|
||||
# All done!
|
||||
return '''\
|
||||
hecl_BlendSrcFactor = %s;
|
||||
hecl_BlendDestFactor = %s;
|
||||
hecl_FragColor[0] = %s;
|
||||
hecl_FragColor[0].a = %s;
|
||||
''' % (blend_src, blend_dest, color_trace_result, alpha_trace_result)
|
||||
|
||||
# DEBUG operator
|
||||
import bpy
|
||||
class hecl_shader_operator(bpy.types.Operator):
|
||||
bl_idname = "scene.hecl_shader"
|
||||
bl_label = "DEBUG HECL shader maker"
|
||||
bl_description = "Test shader generation utility"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.object and context.object.type == 'MESH'
|
||||
|
||||
def execute(self, context):
|
||||
shad = shader(context.object.active_material, context.object, bpy.data.filepath)
|
||||
|
||||
vs = bpy.data.texts.new('HECL SHADER')
|
||||
vs.write(shad)
|
||||
|
||||
return {'FINISHED'}
|
|
@ -0,0 +1,101 @@
|
|||
'''
|
||||
HMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
This file defines the `hmdl_skin` class to iteratively construct
|
||||
a Skinning Info Section for `PAR1` HMDL files. Used by draw-format
|
||||
generators to select an optimal skin entry for a draw primitive,
|
||||
or have a new one established.
|
||||
'''
|
||||
|
||||
import struct
|
||||
import bpy
|
||||
from . import hmdl_anim
|
||||
|
||||
class hmdl_skin:
|
||||
|
||||
# Set up with HMDL containing ARMATURE `object`
|
||||
def __init__(self, max_bone_count, vertex_groups):
|
||||
self.max_bone_count = max_bone_count
|
||||
self.mesh_vertex_groups = vertex_groups
|
||||
|
||||
self.bone_arrays = []
|
||||
|
||||
|
||||
# Augment bone array with loop vert and return weight array
|
||||
# Returns 'None' if bone overflow
|
||||
def augment_bone_array_with_lv(self, mesh_data, bone_array, loop_vert):
|
||||
vertex = mesh_data.vertices[loop_vert[0].loop.vertex_index]
|
||||
|
||||
# Loop-vert weight array
|
||||
weight_array = []
|
||||
for i in range(len(bone_array)):
|
||||
weight_array.append(0.0)
|
||||
|
||||
# Tentative bone additions
|
||||
new_bones = []
|
||||
|
||||
|
||||
# Determine which bones (vertex groups) belong to loop_vert
|
||||
for group_elem in vertex.groups:
|
||||
vertex_group = self.mesh_vertex_groups[group_elem.group]
|
||||
|
||||
if vertex_group.name not in bone_array:
|
||||
|
||||
# Detect bone overflow
|
||||
if len(bone_array) + len(new_bones) >= self.max_bone_count:
|
||||
return None
|
||||
|
||||
# Add to array otherwise
|
||||
new_bones.append(vertex_group.name)
|
||||
|
||||
# Record bone weight
|
||||
weight_array.append(group_elem.weight)
|
||||
|
||||
else:
|
||||
|
||||
# Record bone weight
|
||||
weight_array[bone_array.index(vertex_group.name)] = group_elem.weight
|
||||
|
||||
|
||||
# If we get here, no overflows; augment bone array and return weight array
|
||||
bone_array.extend(new_bones)
|
||||
return weight_array
|
||||
|
||||
|
||||
# Augment triangle-strip bone array to rigging info
|
||||
def augment_skin(self, bone_array):
|
||||
if bone_array not in self.bone_arrays:
|
||||
self.bone_arrays.append(bone_array)
|
||||
return (len(self.bone_arrays)-1)
|
||||
return self.bone_arrays.index(bone_array)
|
||||
|
||||
|
||||
# Generate Rigging Info structure (call after all index-buffers generated)
|
||||
def generate_rigging_info(self, endian_char):
|
||||
|
||||
skin_entries = []
|
||||
for bone_array in self.bone_arrays:
|
||||
skin_bytes = bytearray()
|
||||
skin_bytes += struct.pack(endian_char + 'I', len(bone_array))
|
||||
for bone in bone_array:
|
||||
skin_bytes += struct.pack(endian_char + 'i', hmdl_anim.hashbone(bone))
|
||||
skin_entries.append(skin_bytes)
|
||||
|
||||
# Generate skinning data
|
||||
info_bytes = bytearray()
|
||||
info_bytes += struct.pack(endian_char + 'I', len(skin_entries))
|
||||
|
||||
cur_offset = len(skin_entries) * 4 + 4
|
||||
for entry in skin_entries:
|
||||
info_bytes += struct.pack(endian_char + 'I', cur_offset)
|
||||
cur_offset += len(entry)
|
||||
|
||||
for entry in skin_entries:
|
||||
info_bytes += entry
|
||||
|
||||
return info_bytes
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
'''
|
||||
HMDL Export Blender Addon
|
||||
By Jack Andersen <jackoalan@gmail.com>
|
||||
|
||||
This file provides the means to generate an RGBA TXTR resource
|
||||
buffer for packaging into an .hlpk (yes, I know this is slow,
|
||||
but it's very flexible and supports Blender's procedural textures)
|
||||
'''
|
||||
|
||||
from mathutils import Vector
|
||||
import struct
|
||||
|
||||
def count_bits(num):
|
||||
accum = 0
|
||||
index = 0
|
||||
for i in range(32):
|
||||
if ((num >> i) & 1):
|
||||
accum += 1
|
||||
index = i
|
||||
return accum, index
|
||||
|
||||
def make_txtr(tex, size=(512,512)):
|
||||
|
||||
if tex.type == 'IMAGE':
|
||||
size = tex.image.size
|
||||
|
||||
# Validate image for mipmapping
|
||||
can_mipmap = False
|
||||
w_bits, w_idx = count_bits(size[0])
|
||||
h_bits, h_idx = count_bits(size[1])
|
||||
if w_bits == 1 and h_bits == 1 and tex.use_mipmap:
|
||||
can_mipmap = True
|
||||
|
||||
# Main image 2D array
|
||||
main_array = []
|
||||
for y in range(size[1]):
|
||||
row = []
|
||||
main_array.append(row)
|
||||
for x in range(size[0]):
|
||||
texel = tex.evaluate((x * 2 / size[0] - 1.0, y * 2 / size[1] - 1.0, 0))
|
||||
row.append(texel)
|
||||
|
||||
# Count potential mipmap levels
|
||||
series_count = 1
|
||||
if can_mipmap:
|
||||
if size[0] > size[1]:
|
||||
series_count = w_idx + 1
|
||||
else:
|
||||
series_count = h_idx + 1
|
||||
|
||||
# Make header
|
||||
tex_bytes = struct.pack('IHHI', 0, size[0], size[1], series_count)
|
||||
|
||||
# Initial mipmap level
|
||||
for y in main_array:
|
||||
for x in y:
|
||||
tex_bytes += struct.pack('BBBB',
|
||||
min(255, int(x[0]*256)),
|
||||
min(255, int(x[1]*256)),
|
||||
min(255, int(x[2]*256)),
|
||||
min(255, int(x[3]*256)))
|
||||
|
||||
# Prepare mipmap maker
|
||||
if can_mipmap:
|
||||
|
||||
# Box filter
|
||||
prev_array = main_array
|
||||
for i in range(series_count - 1):
|
||||
new_array = []
|
||||
for y in range(max(len(prev_array) // 2, 1)):
|
||||
y1 = prev_array[y*2]
|
||||
if len(prev_array) > 1:
|
||||
y2 = prev_array[y*2+1]
|
||||
else:
|
||||
y2 = prev_array[y*2]
|
||||
new_row = []
|
||||
new_array.append(new_row)
|
||||
for x in range(max(len(y1) // 2, 1)):
|
||||
texel_val = Vector((0,0,0,0))
|
||||
texel_val += y1[x*2]
|
||||
texel_val += y2[x*2]
|
||||
if len(y1) > 1:
|
||||
texel_val += y1[x*2+1]
|
||||
texel_val += y2[x*2+1]
|
||||
else:
|
||||
texel_val += y1[x*2]
|
||||
texel_val += y2[x*2]
|
||||
texel_val /= 4
|
||||
new_row.append(texel_val)
|
||||
tex_bytes += struct.pack('BBBB',
|
||||
min(255, int(texel_val[0]*256)),
|
||||
min(255, int(texel_val[1]*256)),
|
||||
min(255, int(texel_val[2]*256)),
|
||||
min(255, int(texel_val[3]*256)))
|
||||
|
||||
prev_array = new_array
|
||||
|
||||
return tex_bytes
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
TEMPLATE = lib
|
||||
CONFIG += staticlib
|
||||
TARGET = hecl-blender
|
||||
CONFIG -= Qt
|
||||
QT =
|
||||
unix:QMAKE_CXXFLAGS += -std=c++11
|
||||
unix:QMAKE_CFLAGS += -std=c99
|
||||
unix:LIBS += -std=c++11
|
||||
clang:QMAKE_CXXFLAGS += -stdlib=libc++
|
||||
clang:LIBS += -stdlib=libc++ -lc++abi
|
||||
|
||||
HEADERS += \
|
||||
$$PWD/CBlenderConnection.hpp
|
||||
|
||||
SOURCES += \
|
||||
$$PWD/CBlenderConnection.cpp
|
||||
|
||||
DISTFILES += \
|
||||
$$PWD/blendershell.py \
|
||||
$$PWD/addon/__init__.py \
|
||||
$$PWD/addon/hmdl/__init__.py \
|
||||
$$PWD/addon/hmdl/hmdl_anim.py \
|
||||
$$PWD/addon/hmdl/hmdl_mesh.py \
|
||||
$$PWD/addon/hmdl/hmdl_shader.py \
|
||||
$$PWD/addon/hmdl/hmdl_skin.py \
|
||||
$$PWD/addon/hmdl/hmdl_txtr.py
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
import bpy, sys, os
|
||||
|
||||
# Extract pipe file descriptors from arguments
|
||||
args = sys.argv[sys.argv.index('--')+1:]
|
||||
readfd = int(args[0])
|
||||
writefd = int(args[1])
|
||||
|
||||
def readpipeline():
|
||||
retval = bytearray()
|
||||
while True:
|
||||
ch = os.read(readfd, 1)
|
||||
if ch == b'\n' or ch == b'':
|
||||
return retval
|
||||
retval += ch
|
||||
|
||||
def writepipeline(linebytes):
|
||||
ch = os.write(writefd, linebytes + b'\n')
|
||||
|
||||
def quitblender():
|
||||
writepipeline(b'QUITTING')
|
||||
bpy.ops.wm.quit_blender()
|
||||
|
||||
# Intro handshake
|
||||
writepipeline(b'READY')
|
||||
ackbytes = readpipeline()
|
||||
if ackbytes != b'ACK':
|
||||
quitblender()
|
||||
|
||||
# Command loop
|
||||
while True:
|
||||
cmdline = readpipeline().split(b' ')
|
||||
|
||||
if not len(cmdline) or cmdline[0] == b'QUIT':
|
||||
quitblender()
|
||||
|
||||
elif cmdline[0] == b'OPEN':
|
||||
bpy.ops.wm.open_mainfile(filepath=cmdline[1].encode())
|
||||
writepipeline(b'SUCCESS')
|
||||
|
||||
elif cmdline[0] == b'TYPE':
|
||||
objname = cmdline[1].encode()
|
||||
|
|
@ -2,6 +2,7 @@
|
|||
#define TXTR_HPP
|
||||
|
||||
#include "HECLDatabase.hpp"
|
||||
#include "helpers.hpp"
|
||||
|
||||
class CTXTRProject : public HECLDatabase::CProjectObject
|
||||
{
|
||||
|
@ -17,6 +18,15 @@ class CTXTRProject : public HECLDatabase::CProjectObject
|
|||
}
|
||||
|
||||
public:
|
||||
static bool ClaimPath(const std::string& path, const std::string&)
|
||||
{
|
||||
if (!HECLHelpers::IsRegularFile(path))
|
||||
return false;
|
||||
if (!HECLHelpers::ContainsMagic(path, "\x89\x50\x4E\x47\x0D\x0A\x1A\x0A", 8))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
CTXTRProject(const ConstructionInfo& info)
|
||||
: CProjectObject(info)
|
||||
{
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
TEMPLATE = lib
|
||||
CONFIG += staticlib
|
||||
TARGET = dataspec
|
||||
TARGET = hecl-dataspec
|
||||
CONFIG -= Qt
|
||||
QT =
|
||||
unix:QMAKE_CXXFLAGS += -std=c++11
|
||||
|
@ -12,6 +12,7 @@ clang:LIBS += -stdlib=libc++ -lc++abi
|
|||
INCLUDEPATH += $$PWD ../include ../extern
|
||||
|
||||
HEADERS += \
|
||||
helpers.hpp \
|
||||
DUMB.hpp \
|
||||
HMDL.hpp \
|
||||
MATR.hpp \
|
||||
|
@ -19,5 +20,6 @@ HEADERS += \
|
|||
TXTR.hpp
|
||||
|
||||
SOURCES += \
|
||||
helpers.cpp \
|
||||
dataspec.cpp
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "helpers.hpp"
|
||||
|
||||
namespace HECLHelpers
|
||||
{
|
||||
|
||||
bool IsRegularFile(const std::string& path)
|
||||
{
|
||||
struct stat theStat;
|
||||
if (stat(path.c_str(), &theStat))
|
||||
return false;
|
||||
if (!S_ISREG(theStat.st_mode))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsDirectoryFile(const std::string& path)
|
||||
{
|
||||
struct stat theStat;
|
||||
if (stat(path.c_str(), &theStat))
|
||||
return false;
|
||||
if (!S_ISDIR(theStat.st_mode))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ContainsMagic(const std::string& path, const char* magicBuf,
|
||||
size_t magicLen, size_t magicOff)
|
||||
{
|
||||
if (!IsRegularFile(path))
|
||||
return false;
|
||||
|
||||
FILE* fp = fopen(path.c_str(), "rb");
|
||||
if (!fp)
|
||||
return false;
|
||||
|
||||
char* readBuf[magicLen];
|
||||
fseek(fp, magicOff, SEEK_SET);
|
||||
size_t readLen = fread(readBuf, 1, magicLen, fp);
|
||||
fclose(fp);
|
||||
|
||||
if (readLen < magicLen)
|
||||
return false;
|
||||
if (memcmp(readBuf, magicBuf, magicLen))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
#ifndef HELPERS_HPP
|
||||
#define HELPERS_HPP
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace HECLHelpers
|
||||
{
|
||||
bool IsRegularFile(const std::string& path);
|
||||
bool IsDirectoryFile(const std::string& path);
|
||||
bool ContainsMagic(const std::string& path, const char* magicBuf,
|
||||
size_t magicLen, size_t magicOff=0);
|
||||
|
||||
bool IsBlenderFile(const std::string& path);
|
||||
|
||||
}
|
||||
|
||||
#endif // HELPERS_HPP
|
|
@ -11,12 +11,13 @@ INCLUDEPATH += ../include
|
|||
|
||||
LIBPATH += $$OUT_PWD/../lib \
|
||||
$$OUT_PWD/../dataspec \
|
||||
$$OUT_PWD/../blender \
|
||||
$$OUT_PWD/../extern/sqlite3 \
|
||||
$$OUT_PWD/../extern/blowfish \
|
||||
$$OUT_PWD/../extern/libpng \
|
||||
$$OUT_PWD/../extern/zlib
|
||||
|
||||
LIBS += -lhecl -ldataspec -lsqlite3 -lblowfish -lpng -lz
|
||||
LIBS += -lhecl -lhecl-dataspec -lhecl-blender -lsqlite3 -lblowfish -lpng -lz
|
||||
|
||||
SOURCES += \
|
||||
$$PWD/main.cpp
|
||||
|
|
|
@ -21,6 +21,7 @@ SUBDIRS += \
|
|||
extern/blowfish \
|
||||
extern/libpng \
|
||||
extern/zlib \
|
||||
blender \
|
||||
lib \
|
||||
dataspec \
|
||||
driver
|
||||
|
@ -29,5 +30,6 @@ driver.depends = extern/sqlite3
|
|||
driver.depends = extern/blowfish
|
||||
driver.depends = extern/libpng
|
||||
driver.depends = extern/zlib
|
||||
driver.depends = blender
|
||||
driver.depends = lib
|
||||
driver.depends = dataspec
|
||||
|
|
|
@ -224,6 +224,7 @@ protected:
|
|||
IDataObject* m_mainObj;
|
||||
IDataObject* m_cookedObj;
|
||||
public:
|
||||
static bool ClaimPath(const std::string&, const std::string&) {return false;}
|
||||
virtual ~CProjectObject();
|
||||
struct ConstructionInfo
|
||||
{
|
||||
|
@ -551,38 +552,49 @@ public:
|
|||
*/
|
||||
struct RegistryEntry
|
||||
{
|
||||
typedef std::function<bool(const std::string& path)> TPathClaimer;
|
||||
typedef std::function<CProjectObject*(const CProjectObject::ConstructionInfo&)> TProjectFactory;
|
||||
typedef std::function<CRuntimeObject*(const CRuntimeObject::ConstructionInfo&)> TRuntimeFactory;
|
||||
const HECL::FourCC& fcc;
|
||||
#ifndef HECL_STRIP_PROJECT
|
||||
std::function<CProjectObject*(const CProjectObject::ConstructionInfo&)> projectFactory;
|
||||
TPathClaimer pathClaimer;
|
||||
TProjectFactory projectFactory;
|
||||
#endif
|
||||
#ifndef HECL_STRIP_RUNTIME
|
||||
std::function<CRuntimeObject*(const CRuntimeObject::ConstructionInfo&)> runtimeFactory;
|
||||
TRuntimeFactory runtimeFactory;
|
||||
#endif
|
||||
};
|
||||
|
||||
static RegistryEntry::TPathClaimer NULL_PATH_CLAIMER =
|
||||
[](const std::string&) -> bool {return false;};
|
||||
static RegistryEntry::TProjectFactory NULL_PROJECT_FACTORY =
|
||||
[](const HECLDatabase::CProjectObject::ConstructionInfo&)
|
||||
-> HECLDatabase::CProjectObject* {return nullptr;};
|
||||
static RegistryEntry::TRuntimeFactory NULL_RUNTIME_FACTORY =
|
||||
[](const HECLDatabase::CRuntimeObject::ConstructionInfo&)
|
||||
-> HECLDatabase::CRuntimeObject* {return nullptr;};
|
||||
|
||||
#if !defined(HECL_STRIP_PROJECT) && !defined(HECL_STRIP_RUNTIME)
|
||||
|
||||
#define REGISTRY_ENTRY(fourcc, projectClass, runtimeClass) {fourcc, \
|
||||
[](const std::string& path) -> bool {return projectClass::ClaimPath(path);}, \
|
||||
[](const HECLDatabase::CProjectObject::ConstructionInfo& info) -> \
|
||||
HECLDatabase::CProjectObject* {return new projectClass(info);}, \
|
||||
[](const HECLDatabase::CRuntimeObject::ConstructionInfo& info) -> \
|
||||
HECLDatabase::CRuntimeObject* {return new runtimeClass(info);}}
|
||||
|
||||
#define REGISTRY_SENTINEL() { HECL::FourCC(), \
|
||||
[](const HECLDatabase::CProjectObject::ConstructionInfo&) -> \
|
||||
HECLDatabase::CProjectObject* {return nullptr;}, \
|
||||
[](const HECLDatabase::CRuntimeObject::ConstructionInfo&) -> \
|
||||
HECLDatabase::CRuntimeObject* {return nullptr;}}
|
||||
#define REGISTRY_SENTINEL() \
|
||||
{HECL::FourCC(), NULL_PATH_CLAIMER, \
|
||||
NULL_PROJECT_FACTORY, NULL_RUNTIME_FACTORY}
|
||||
|
||||
#elif !defined(HECL_STRIP_PROJECT)
|
||||
|
||||
#define REGISTRY_ENTRY(fourcc, projectClass, runtimeClass) {fourcc, \
|
||||
[](const std::string& path) -> bool {return projectClass::ClaimPath(path);}, \
|
||||
[](const HECLDatabase::CProjectObject::ConstructionInfo& info) -> \
|
||||
HECLDatabase::CProjectObject* {return new projectClass(info);}}
|
||||
|
||||
#define REGISTRY_SENTINEL() { HECL::FourCC(), \
|
||||
[](const HECLDatabase::CProjectObject::ConstructionInfo&) -> \
|
||||
HECLDatabase::CProjectObject* {return nullptr;}}
|
||||
#define REGISTRY_SENTINEL() {HECL::FourCC(), NULL_PATH_CLAIMER, NULL_PROJECT_FACTORY}
|
||||
|
||||
#elif !defined(HECL_STRIP_RUNTIME)
|
||||
|
||||
|
@ -590,9 +602,7 @@ struct RegistryEntry
|
|||
[](const HECLDatabase::CRuntimeObject::ConstructionInfo& info) -> \
|
||||
HECLDatabase::CRuntimeObject* {return new runtimeClass(info);}}
|
||||
|
||||
#define REGISTRY_SENTINEL() { HECL::FourCC(), \
|
||||
[](const HECLDatabase::CRuntimeObject::ConstructionInfo&) -> \
|
||||
HECLDatabase::CRuntimeObject* {return nullptr;}}
|
||||
#define REGISTRY_SENTINEL() {HECL::FourCC(), NULL_RUNTIME_FACTORY}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -13,23 +13,36 @@ namespace HECLDatabase
|
|||
|
||||
/* Private sqlite3 backend to be used by database subclasses */
|
||||
|
||||
static const char* skDBINIT =
|
||||
static const char* skMAINDBINIT =
|
||||
"PRAGMA foreign_keys = ON;\n"
|
||||
"CREATE TABLE IF NOT EXISTS objects(rowid INTEGER PRIMARY KEY,"
|
||||
"path,"
|
||||
"subpath"
|
||||
"type4cc UNSIGNED INTEGER,"
|
||||
"hash64 INTEGER);\n"
|
||||
"CREATE INDEX IF NOT EXISTS nameidx ON objects(name);\n"
|
||||
"CREATE TABLE IF NOT EXISTS deplinks(groupId,"
|
||||
"objId REFERENCES objects(rowid) ON DELETE CASCADE,"
|
||||
"UNIQUE (groupId, objId) ON CONFLICT IGNORE);\n"
|
||||
"CREATE INDEX IF NOT EXISTS grpidx ON deplinks(groupId);\n"
|
||||
"CREATE INDEX IF NOT EXISTS depidx ON deplinks(objId);\n"
|
||||
"CREATE TABLE IF NOT EXISTS cooked(objid INTEGER PRIMARY KEY REFERENCES objects(rowid) ON DELETE CASCADE,"
|
||||
"offset UNSIGNED INTEGER,"
|
||||
"compLen UNSIGNED INTEGER,"
|
||||
"decompLen UNSIGNED INTEGER);\n";
|
||||
"CREATE TABLE IF NOT EXISTS grps("
|
||||
"grpid INTEGER PRIMARY KEY," /* Unique group identifier (used as in-game ref) */
|
||||
"path);\n" /* Directory path collecting working files for group */
|
||||
"CREATE TABLE IF NOT EXISTS objs("
|
||||
"objid INTEGER PRIMARY KEY," /* Unique object identifier (used as in-game ref) */
|
||||
"path," /* Path of working file */
|
||||
"subpath DEFAULT NULL," /* String name of sub-object within working file (i.e. blender object) */
|
||||
"cookedHash64 INTEGER DEFAULT NULL," /* Hash of last cooking pass */
|
||||
"cookedTime64 INTEGER DEFAULT NULL);\n"; /* UTC unix-time of last cooking pass */
|
||||
|
||||
static const char* skCOOKEDDBINIT =
|
||||
"PRAGMA foreign_keys = ON;\n"
|
||||
"CREATE TABLE IF NOT EXISTS cgrps("
|
||||
"grpid INTEGER PRIMARY KEY," /* Unique group identifier (from main DB) */
|
||||
"offset UNSIGNED INTEGER," /* Group-blob offset within package */
|
||||
"compLen UNSIGNED INTEGER," /* Compressed blob-length */
|
||||
"decompLen UNSIGNED INTEGER);\n" /* Decompressed blob-length */
|
||||
"CREATE TABLE IF NOT EXISTS cobjs("
|
||||
"objid INTEGER PRIMARY KEY," /* Unique object identifier (from main DB) */
|
||||
"type4cc UNSIGNED INTEGER," /* Type FourCC as claimed by first project class in dataspec */
|
||||
"loosegrp REFERENCES cgrps(grpid) ON DELETE SET NULL DEFAULT NULL);\n" /* single-object group of ungrouped object */
|
||||
"CREATE TABLE IF NOT EXISTS cgrplinks("
|
||||
"grpid REFERENCES cgrps(grpid) ON DELETE CASCADE," /* Group ref */
|
||||
"objid REFERENCES cobjs(objid) ON DELETE CASCADE," /* Object ref */
|
||||
"offset UNSIGNED INTEGER," /* Offset within decompressed group-blob */
|
||||
"decompLen UNSIGNED INTEGER," /* Decompressed object length */
|
||||
"UNIQUE (grpid, objid) ON CONFLICT IGNORE);\n"
|
||||
"CREATE INDEX IF NOT EXISTS grpidx ON cgrplinks(grpid);\n";
|
||||
|
||||
#define PREPSTMT(stmtSrc, outVar)\
|
||||
if (sqlite3_prepare_v2(m_db, stmtSrc, 0, &outVar, NULL) != SQLITE_OK)\
|
||||
|
@ -39,15 +52,13 @@ if (sqlite3_prepare_v2(m_db, stmtSrc, 0, &outVar, NULL) != SQLITE_OK)\
|
|||
return;\
|
||||
}
|
||||
|
||||
class CSQLite
|
||||
class CSQLiteMain
|
||||
{
|
||||
sqlite3* m_db;
|
||||
|
||||
sqlite3_stmt* m_selObjects;
|
||||
sqlite3_stmt* m_selObjectByName;
|
||||
sqlite3_stmt* m_selDistictDepGroups;
|
||||
sqlite3_stmt* m_selDepGroupObjects;
|
||||
sqlite3_stmt* m_insObject;
|
||||
sqlite3_stmt* m_selObjs;
|
||||
sqlite3_stmt* m_selGrps;
|
||||
|
||||
|
||||
struct SCloseBuf
|
||||
{
|
||||
|
@ -61,7 +72,7 @@ class CSQLite
|
|||
}
|
||||
|
||||
public:
|
||||
CSQLite(const char* path, bool readonly)
|
||||
CSQLiteMain(const char* path, bool readonly)
|
||||
{
|
||||
/* Open database connection */
|
||||
int errCode = 0;
|
||||
|
@ -94,7 +105,7 @@ public:
|
|||
PREPSTMT("INSERT INTO objects(name,type4cc,hash64,compLen,decompLen) VALUES (?1,?2,?3,?4,?5)", m_insObject);
|
||||
}
|
||||
|
||||
~CSQLite()
|
||||
~CSQLiteMain()
|
||||
{
|
||||
sqlite3_finalize(m_selObjects);
|
||||
sqlite3_finalize(m_selObjectByName);
|
||||
|
|
|
@ -80,7 +80,7 @@ static int newBlockSlot(memlba_file* file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void decompressBlock(memlba_file* file, int blockIdx, int targetSlot)
|
||||
static void decompressBlock(memlba_file* file, unsigned blockIdx, int targetSlot)
|
||||
{
|
||||
if (blockIdx >= file->headBuf->blockCount)
|
||||
{
|
||||
|
@ -158,7 +158,7 @@ static sqlite3_vfs memlba_vfs =
|
|||
memlbaRandomness, /* xRandomness */
|
||||
memlbaSleep, /* xSleep */
|
||||
memlbaCurrentTime, /* xCurrentTime */
|
||||
0 /* xCurrentTimeInt64 */
|
||||
0, 0, 0, 0, 0
|
||||
};
|
||||
|
||||
static sqlite3_io_methods memlba_io_methods =
|
||||
|
@ -218,15 +218,15 @@ static int memlbaRead(
|
|||
unsigned firstRemBytes = pTmp->headBuf->blockSize - firstOff;
|
||||
|
||||
int slot = getBlockSlot(pTmp, blockIdx);
|
||||
unsigned toRead = MIN(iAmt, firstRemBytes);
|
||||
unsigned toRead = MIN((unsigned)iAmt, firstRemBytes);
|
||||
memcpy(zBuf, pTmp->cachedBlockBufs[slot] + firstOff, toRead);
|
||||
iAmt -= toRead;
|
||||
zBuf += toRead;
|
||||
|
||||
while (iAmt)
|
||||
while (iAmt > 0)
|
||||
{
|
||||
slot = getBlockSlot(pTmp, ++blockIdx);
|
||||
toRead = MIN(iAmt, pTmp->headBuf->blockSize);
|
||||
toRead = MIN((unsigned)iAmt, pTmp->headBuf->blockSize);
|
||||
memcpy(zBuf, pTmp->cachedBlockBufs[slot], toRead);
|
||||
iAmt -= toRead;
|
||||
zBuf += toRead;
|
||||
|
@ -245,6 +245,7 @@ static int memlbaWrite(
|
|||
sqlite_int64 iOfst
|
||||
)
|
||||
{
|
||||
(void)pFile; (void)zBuf; (void)iAmt; (void)iOfst;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -253,7 +254,7 @@ static int memlbaWrite(
|
|||
*/
|
||||
static int memlbaTruncate(sqlite3_file* pFile, sqlite_int64 size)
|
||||
{
|
||||
memlba_file* pTmp = (memlba_file*)pFile;
|
||||
(void)pFile; (void)size;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -262,6 +263,7 @@ static int memlbaTruncate(sqlite3_file* pFile, sqlite_int64 size)
|
|||
*/
|
||||
static int memlbaSync(sqlite3_file* pFile, int flags)
|
||||
{
|
||||
(void)pFile; (void)flags;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -280,6 +282,7 @@ static int memlbaFileSize(sqlite3_file* pFile, sqlite_int64* pSize)
|
|||
*/
|
||||
static int memlbaLock(sqlite3_file* pFile, int eLock)
|
||||
{
|
||||
(void)pFile; (void)eLock;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -288,6 +291,7 @@ static int memlbaLock(sqlite3_file* pFile, int eLock)
|
|||
*/
|
||||
static int memlbaUnlock(sqlite3_file* pFile, int eLock)
|
||||
{
|
||||
(void)pFile; (void)eLock;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -296,6 +300,7 @@ static int memlbaUnlock(sqlite3_file* pFile, int eLock)
|
|||
*/
|
||||
static int memlbaCheckReservedLock(sqlite3_file* pFile, int* pResOut)
|
||||
{
|
||||
(void)pFile;
|
||||
*pResOut = 0;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
@ -305,6 +310,7 @@ static int memlbaCheckReservedLock(sqlite3_file* pFile, int* pResOut)
|
|||
*/
|
||||
static int memlbaFileControl(sqlite3_file* pFile, int op, void* pArg)
|
||||
{
|
||||
(void)pFile; (void)op; (void)pArg;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -313,6 +319,7 @@ static int memlbaFileControl(sqlite3_file* pFile, int op, void* pArg)
|
|||
*/
|
||||
static int memlbaSectorSize(sqlite3_file* pFile)
|
||||
{
|
||||
(void)pFile;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -321,6 +328,7 @@ static int memlbaSectorSize(sqlite3_file* pFile)
|
|||
*/
|
||||
static int memlbaDeviceCharacteristics(sqlite3_file* pFile)
|
||||
{
|
||||
(void)pFile;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -335,6 +343,7 @@ static int memlbaOpen(
|
|||
int* pOutFlags
|
||||
)
|
||||
{
|
||||
(void)pVfs; (void)zName; (void)pOutFlags;
|
||||
if ((flags & SQLITE_OPEN_MAIN_DB) != SQLITE_OPEN_MAIN_DB ||
|
||||
(flags & SQLITE_OPEN_READONLY) != SQLITE_OPEN_READONLY)
|
||||
{
|
||||
|
@ -351,6 +360,7 @@ static int memlbaOpen(
|
|||
for (i=0 ; i<BLOCK_SLOTS ; ++i)
|
||||
{
|
||||
p2->cachedBlockBufs[i] = blockBufs + p2->headBuf->blockSize * i;
|
||||
p2->cachedBlockIndices[i] = -1;
|
||||
}
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
@ -362,6 +372,7 @@ static int memlbaOpen(
|
|||
*/
|
||||
static int memlbaDelete(sqlite3_vfs* pVfs, const char* zPath, int dirSync)
|
||||
{
|
||||
(void)pVfs; (void)zPath; (void)dirSync;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
||||
|
@ -376,7 +387,8 @@ static int memlbaAccess(
|
|||
int* pResOut
|
||||
)
|
||||
{
|
||||
if(flags & SQLITE_ACCESS_READ | SQLITE_ACCESS_READWRITE)
|
||||
(void)pVfs; (void)zPath; (void)pResOut;
|
||||
if (flags & (SQLITE_ACCESS_READ | SQLITE_ACCESS_READWRITE))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -392,6 +404,7 @@ static int memlbaFullPathname(
|
|||
int nOut, /* Size of output buffer in bytes */
|
||||
char* zOut) /* Output buffer */
|
||||
{
|
||||
(void)pVfs;
|
||||
strncpy(zOut, zPath, nOut);
|
||||
return SQLITE_OK;
|
||||
}
|
||||
|
@ -401,6 +414,7 @@ static int memlbaFullPathname(
|
|||
*/
|
||||
static void* memlbaDlOpen(sqlite3_vfs* pVfs, const char* zPath)
|
||||
{
|
||||
(void)pVfs; (void)zPath;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -411,6 +425,7 @@ static void* memlbaDlOpen(sqlite3_vfs* pVfs, const char* zPath)
|
|||
*/
|
||||
static void memlbaDlError(sqlite3_vfs* pVfs, int nByte, char* zErrMsg)
|
||||
{
|
||||
(void)pVfs; (void)nByte; (void)zErrMsg;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -418,6 +433,8 @@ static void memlbaDlError(sqlite3_vfs* pVfs, int nByte, char* zErrMsg)
|
|||
*/
|
||||
static void (*memlbaDlSym(sqlite3_vfs* pVfs, void* pH, const char* zSym))(void)
|
||||
{
|
||||
(void)pVfs; (void)pH; (void)zSym;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -425,6 +442,7 @@ static void (*memlbaDlSym(sqlite3_vfs* pVfs, void* pH, const char* zSym))(void)
|
|||
*/
|
||||
static void memlbaDlClose(sqlite3_vfs* pVfs, void* pHandle)
|
||||
{
|
||||
(void)pVfs; (void)pHandle;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -433,6 +451,7 @@ static void memlbaDlClose(sqlite3_vfs* pVfs, void* pHandle)
|
|||
*/
|
||||
static int memlbaRandomness(sqlite3_vfs* pVfs, int nByte, char* zBufOut)
|
||||
{
|
||||
(void)pVfs;
|
||||
for(int i = 0 ; i < nByte ; ++i)
|
||||
zBufOut[i] = rand();
|
||||
return nByte;
|
||||
|
@ -444,6 +463,7 @@ static int memlbaRandomness(sqlite3_vfs* pVfs, int nByte, char* zBufOut)
|
|||
*/
|
||||
static int memlbaSleep(sqlite3_vfs* pVfs, int nMicro)
|
||||
{
|
||||
(void)pVfs;
|
||||
int seconds = (nMicro + 999999) / 1000000;
|
||||
sleep(seconds);
|
||||
return seconds * 1000000;
|
||||
|
@ -454,6 +474,7 @@ static int memlbaSleep(sqlite3_vfs* pVfs, int nMicro)
|
|||
*/
|
||||
static int memlbaCurrentTime(sqlite3_vfs* pVfs, double* pTimeOut)
|
||||
{
|
||||
(void)pVfs;
|
||||
*pTimeOut = 0.0;
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue