Integration work on mesh optimization

This commit is contained in:
Jack Andersen 2015-10-02 15:53:45 -10:00
parent bebeffb247
commit ab5451ea45
9 changed files with 596 additions and 930 deletions

View File

@ -456,27 +456,22 @@ void BlenderConnection::PyOutStream::linkBlend(const std::string& target,
objName.c_str(), objName.c_str(), target.c_str(), objName.c_str());
}
BlenderConnection::DataStream::Mesh::Mesh(BlenderConnection& conn)
BlenderConnection::DataStream::Mesh::Mesh(BlenderConnection& conn, int maxSkinBanks)
{
uint32_t matCount;
conn._readBuf(&matCount, 4);
materials.reserve(matCount);
for (int i=0 ; i<matCount ; ++i)
uint32_t matSetCount;
conn._readBuf(&matSetCount, 4);
materialSets.reserve(matSetCount);
for (int i=0 ; i<matSetCount ; ++i)
{
char mat[2048];
conn._readLine(mat, 2048);
materials.push_back(mat);
materialSets.emplace_back();
std::vector<Material>& materials = materialSets.back();
uint32_t matCount;
conn._readBuf(&matCount, 4);
materials.reserve(matCount);
for (int i=0 ; i<matCount ; ++i)
materials.emplace_back(conn);
}
uint32_t submeshCount;
conn._readBuf(&submeshCount, 4);
submeshes.reserve(submeshCount);
for (int i=0 ; i<submeshCount ; ++i)
submeshes.emplace_back(conn);
}
BlenderConnection::DataStream::Mesh::Submesh::Submesh(BlenderConnection& conn)
{
uint32_t count;
conn._readBuf(&count, 4);
pos.reserve(count);
@ -528,39 +523,53 @@ BlenderConnection::DataStream::Mesh::Submesh::Submesh(BlenderConnection& conn)
binds.emplace_back(conn);
}
conn._readBuf(&count, 4);
skinBanks.reserve(count);
for (int i=0 ; i<count ; ++i)
uint8_t isSurf;
conn._readBuf(&isSurf, 1);
while (isSurf)
{
skinBanks.emplace_back();
std::vector<Index>& bank = skinBanks.back();
uint32_t idxCount;
conn._readBuf(&idxCount, 4);
bank.reserve(idxCount);
for (int j=0 ; j<idxCount ; ++j)
bank.emplace_back(conn);
surfaces.emplace_back(conn, *this);
conn._readBuf(&isSurf, 1);
}
conn._readBuf(&count, 4);
surfaces.reserve(count);
for (int i=0 ; i<count ; ++i)
surfaces.emplace_back(conn, *this);
/* Resolve skin banks here */
if (boneNames.size())
for (Surface& surf : surfaces)
skinBanks.addSurface(surf);
}
BlenderConnection::DataStream::Mesh::Submesh::Surface::Surface
(BlenderConnection& conn, const Submesh& parent)
: centroid(conn), materialIdx(conn), aabbMin(conn), aabbMax(conn),
reflectionNormal(conn), skinBankIdx(conn)
BlenderConnection::DataStream::Mesh::Material::Material
(BlenderConnection& conn)
{
uint32_t count;
conn._readBuf(&count, 4);
verts.reserve(count);
for (int i=0 ; i<count ; ++i)
verts.emplace_back(conn, parent);
char buf[4096];
conn._readLine(buf, 4096);
source.assign(buf);
uint32_t texCount;
conn._readBuf(&texCount, 4);
texs.reserve(texCount);
for (int i=0 ; i<texCount ; ++i)
{
conn._readLine(buf, 4096);
texs.emplace_back(buf);
}
}
BlenderConnection::DataStream::Mesh::Submesh::Surface::Vert::Vert
(BlenderConnection& conn, const Submesh& parent)
BlenderConnection::DataStream::Mesh::Surface::Surface
(BlenderConnection& conn, const Mesh& parent)
: centroid(conn), materialIdx(conn), aabbMin(conn), aabbMax(conn),
reflectionNormal(conn)
{
uint8_t isVert;
conn._readBuf(&isVert, 1);
while (isVert)
{
verts.emplace_back(conn, parent);
conn._readBuf(&isVert, 1);
}
}
BlenderConnection::DataStream::Mesh::Surface::Vert::Vert
(BlenderConnection& conn, const Mesh& parent)
{
conn._readBuf(&iPos, 4);
conn._readBuf(&iNorm, 4);

View File

@ -282,90 +282,93 @@ public:
struct Mesh
{
/* HECL source of each material */
std::vector<std::string> materials;
/* Encapsulates mesh data up to maximum indexing space,
* overflowing to additional Submeshes as needed */
struct Submesh
struct Material
{
/* Vertex buffer data */
struct Vector2f
{
float val[2];
Vector2f(BlenderConnection& conn) {conn._readBuf(val, 8);}
};
struct Vector3f
{
float val[3];
Vector3f(BlenderConnection& conn) {conn._readBuf(val, 12);}
};
struct Vector4f
{
float val[4];
Vector4f(BlenderConnection& conn) {conn._readBuf(val, 16);}
};
struct Index
{
uint32_t val;
Index(BlenderConnection& conn) {conn._readBuf(&val, 4);}
};
std::vector<Vector3f> pos;
std::vector<Vector3f> norm;
uint32_t colorLayerCount = 0;
std::vector<Vector4f> color[4];
uint32_t uvLayerCount = 0;
std::vector<Vector2f> uv[8];
std::string source;
std::vector<std::string> texs;
/* Skinning data */
std::vector<std::string> boneNames;
struct SkinBind
{
uint32_t boneIdx;
float weight;
SkinBind(BlenderConnection& conn) {conn._readBuf(&boneIdx, 8);}
};
std::vector<std::vector<SkinBind>> skins;
std::vector<std::vector<Index>> skinBanks;
/* Islands of the same material/skinBank are represented here */
struct Surface
{
Vector3f centroid;
Index materialIdx;
Vector3f aabbMin;
Vector3f aabbMax;
Vector3f reflectionNormal;
Index skinBankIdx;
/* Vertex indexing data */
struct Vert
{
uint32_t iPos;
uint32_t iNorm;
uint32_t iColor[4] = {uint32_t(-1)};
uint32_t iUv[8] = {uint32_t(-1)};
uint32_t iSkin;
Vert(BlenderConnection& conn, const Submesh& parent);
};
std::vector<Vert> verts;
Surface(BlenderConnection& conn, const Submesh& parent);
};
std::vector<Surface> surfaces;
Submesh(BlenderConnection& conn);
Material(BlenderConnection& conn);
};
std::vector<Submesh> submeshes;
std::vector<std::vector<Material>> materialSets;
Mesh(BlenderConnection& conn);
/* Vertex buffer data */
struct Vector2f
{
float val[2];
Vector2f(BlenderConnection& conn) {conn._readBuf(val, 8);}
};
struct Vector3f
{
float val[3];
Vector3f(BlenderConnection& conn) {conn._readBuf(val, 12);}
};
struct Index
{
uint32_t val;
Index(BlenderConnection& conn) {conn._readBuf(&val, 4);}
};
std::vector<Vector3f> pos;
std::vector<Vector3f> norm;
uint32_t colorLayerCount = 0;
std::vector<Vector3f> color[4];
uint32_t uvLayerCount = 0;
std::vector<Vector2f> uv[8];
/* Skinning data */
std::vector<std::string> boneNames;
struct SkinBind
{
uint32_t boneIdx;
float weight;
SkinBind(BlenderConnection& conn) {conn._readBuf(&boneIdx, 8);}
};
std::vector<std::vector<SkinBind>> skins;
/* Islands of the same material/skinBank are represented here */
struct Surface
{
Vector3f centroid;
Index materialIdx;
Vector3f aabbMin;
Vector3f aabbMax;
Vector3f reflectionNormal;
uint32_t skinBankIdx;
/* Vertex indexing data (all primitives joined as degenerate tri-strip) */
struct Vert
{
uint32_t iPos;
uint32_t iNorm;
uint32_t iColor[4] = {uint32_t(-1)};
uint32_t iUv[8] = {uint32_t(-1)};
uint32_t iSkin;
Vert(BlenderConnection& conn, const Mesh& parent);
};
std::vector<Vert> verts;
Surface(BlenderConnection& conn, const Mesh& parent);
};
std::vector<Surface> surfaces;
class SkinBanks
{
std::vector<std::vector<uint32_t>> banks;
public:
uint32_t addSurface(const Surface& surf)
{
return 0;
}
} skinBanks;
Mesh(BlenderConnection& conn, int maxSkinBanks);
};
/* Compile mesh by name */
Mesh compileMesh(const std::string& name, int maxIdx=65535, int maxSkinBanks=10)
Mesh compileMesh(const std::string& name, int maxSkinBanks=10)
{
char req[128];
snprintf(req, 128, "MESHCOMPILE %s %d %d", name.c_str(), maxIdx, maxSkinBanks);
snprintf(req, 128, "MESHCOMPILE %s %d", name.c_str(), maxSkinBanks);
m_parent->_writeLine(req);
char readBuf[256];
@ -373,14 +376,14 @@ public:
if (strcmp(readBuf, "OK"))
BlenderLog.report(LogVisor::FatalError, "unable to cook mesh '%s': %s", name.c_str(), readBuf);
return Mesh(*m_parent);
return Mesh(*m_parent, maxSkinBanks);
}
/* Compile all meshes into one */
Mesh compileAllMeshes(int maxIdx=65535, int maxSkinBanks=10)
Mesh compileAllMeshes(int maxSkinBanks=10)
{
char req[128];
snprintf(req, 128, "MESHCOMPILEALL %d %d", maxIdx, maxSkinBanks);
snprintf(req, 128, "MESHCOMPILEALL %d", maxSkinBanks);
m_parent->_writeLine(req);
char readBuf[256];
@ -388,7 +391,7 @@ public:
if (strcmp(readBuf, "OK"))
BlenderLog.report(LogVisor::FatalError, "unable to cook all meshes: %s", readBuf);
return Mesh(*m_parent);
return Mesh(*m_parent, maxSkinBanks);
}
};
DataStream beginData()

View File

@ -6,7 +6,6 @@ list(APPEND PY_SOURCES
hecl/hmdl/HMDLMesh.py
hecl/hmdl/HMDLShader.py
hecl/hmdl/HMDLSkin.py
hecl/hmdl/HMDLTxtr.py
hecl/sact/__init__.py
hecl/sact/SACTAction.py
hecl/sact/SACTEvent.py

View File

@ -19,10 +19,10 @@ from bpy.app.handlers import persistent
# Appendable list allowing external addons to register additional resource types
hecl_typeS = [
('NONE', "None", "Active scene not using HECL", None, None),
('MESH', "Mesh", "Active scene represents an HMDL Mesh", hmdl.draw, hmdl.cook),
('ACTOR', "Actor", "Active scene represents a HECL Actor", sact.draw, sact.cook),
('AREA', "Area", "Active scene represents a HECL Area", srea.draw, srea.cook)]
('NONE', "None", "Active scene not using HECL", None),
('MESH', "Mesh", "Active scene represents an HMDL Mesh", hmdl.draw),
('ACTOR', "Actor", "Active scene represents a HECL Actor", sact.draw),
('AREA', "Area", "Active scene represents a HECL Area", srea.draw)]
# Main Scene Panel
class hecl_scene_panel(bpy.types.Panel):
@ -47,15 +47,6 @@ class hecl_scene_panel(bpy.types.Panel):
break
# Blender-selected polymorphism cook
def do_cook(writebuf, platform_type, endian_char):
for tp in hecl_typeS:
if tp[0] == bpy.context.scene.hecl_type:
if callable(tp[4]):
return tp[4](writefd, platform_type, endian_char)
return False
# Blender export-type registration
def register_export_type_enum():
bpy.types.Scene.hecl_type = bpy.props.EnumProperty(items=
@ -74,20 +65,7 @@ def add_export_type(type_tuple):
# Shell command receiver (from HECL driver)
def command(cmdline, writepipeline, writepipebuf):
if cmdline[0] == b'COOK':
resource_type = bpy.context.scene.hecl_type.encode()
writepipeline(resource_type)
ackbytes = readpipeline()
if ackbytes != b'ACK':
return
try:
result = do_cook(writepipebuf, cmdline[1].decode(), cmdline[2].decode())
if result == None or result == True:
writepipeline(b'SUCCESS')
else:
writepipeline(b'FAILURE')
except:
writepipeline(b'EXCEPTION')
pass
# Load scene callback
from bpy.app.handlers import persistent

View File

@ -3,528 +3,316 @@ HMDL Export Blender Addon
By Jack Andersen <jackoalan@gmail.com>
'''
import struct
import bpy
import bpy, bmesh, operator, struct
from mathutils import Vector
class loop_vert:
# Class for building unique sets of vertex attributes for VBO generation
class VertPool:
pos = {}
norm = {}
skin = {}
color = []
uv = []
dlay = None
clays = []
ulays = []
def __init__(self, mesh, loop):
self.mesh = mesh
self.loop = loop
# Initialize hash-unique index for each available attribute
def __init__(self, bm):
dlay = None
if len(bm.verts.layers.deform):
dlay = bm.verts.layers.deform[0]
self.dlay = dlay
def __hash__(self):
return (self.mesh, self.loop.index).__hash__()
def __eq__(self, other):
return self.mesh == other.mesh and self.loop.index == other.loop.index
def __ne__(self, other):
return self.mesh != other.mesh or self.loop.index != other.loop.index
clays = []
for cl in range(len(bm.loops.layers.color)):
clays.append(bm.loops.layers.color[cl])
self.color.append([])
self.clays = clays
def __str__(self):
return (self.mesh, self.loop.index).__str__()
ulays = []
for ul in range(len(bm.loops.layers.uv)):
ulays.append(bm.loops.layers.uv[ul])
self.uv.append([])
self.ulays = ulays
# Per-vert pool attributes
for v in bm.verts:
pf = v.co.copy().freeze()
if pf not in self.pos:
self.pos[pf] = len(self.pos)
nf = v.normal.copy().freeze()
if nf not in self.norm:
self.norm[nf] = len(self.norm)
if dlay:
sf = tuple(sorted(v[dlay].items()))
if sf not in self.skin:
self.skin[sf] = len(self.skin)
# Round up to nearest 32 multiple
def ROUND_UP_32(num):
return (num + 31) & ~31
# Per-loop pool attributes
for f in bm.faces:
for l in f.loops:
for cl in range(len(clays)):
cf = l[clays[cl]].copy().freeze()
if cf not in self.color[cl]:
self.color[cl][cf] = len(self.color[cl])
for ul in range(len(ulays)):
uf = l[ulays[ul]].uv.copy().freeze()
if uf not in self.uv[ul]:
self.uv[ul][uf] = len(self.uv[ul])
# Round up to nearest 4 multiple
def ROUND_UP_4(num):
return (num + 3) & ~3
def write_out(self, writebuffunc, vert_groups):
writebuffunc(struct.pack('I', len(self.pos)))
for p in sorted(self.pos.items(), key=operator.itemgetter(1)):
writebuffunc(struct.pack('fff', p[0][0], p[0][1], p[0][2]))
# This routine conditionally inserts a loop into a multi-tiered
# array/set collection; simultaneously relating verts to loops and
# eliminating redundant loops (containing identical UV coordinates)
def _augment_loop_vert_array(lv_array, mesh, loop, uv_count):
# Create loop_vert object for comparitive testing
lv = loop_vert(mesh, loop)
# First perform quick check to see if loop is already in a set
for existing_loop_set in lv_array:
if lv in existing_loop_set:
return
writebuffunc(struct.pack('I', len(self.norm)))
for n in sorted(self.norm.items(), key=operator.itemgetter(1)):
writebuffunc(struct.pack('fff', n[0][0], n[0][1], n[0][2]))
# Now perform extended check to see if any UV coordinate values already match
for existing_loop_set in lv_array:
for existing_loop in existing_loop_set:
matches = True
for uv_layer_idx in range(uv_count):
uv_layer = mesh.uv_layers[uv_layer_idx]
existing_uv_coords = uv_layer.data[existing_loop.loop.index].uv
check_uv_coords = uv_layer.data[loop.index].uv
if (existing_uv_coords[0] != check_uv_coords[0] or
existing_uv_coords[1] != check_uv_coords[1]):
matches = False
break
if matches:
existing_loop_set.append(lv)
return
writebuffunc(struct.pack('I', len(self.color)))
for clay in self.color:
writebuffunc(struct.pack('I', len(clay)))
for c in sorted(clay.items(), key=operator.itemgetter(1)):
writebuffunc(struct.pack('fff', c[0][0], c[0][1], c[0][2]))
# If we get here, no match found; add new set to `lv_array`
lv_array.append([lv])
writebuffunc(struct.pack('I', len(self.uv)))
for ulay in self.uv:
writebuffunc(struct.pack('I', len(ulay)))
for u in sorted(ulay.items(), key=operator.itemgetter(1)):
writebuffunc(struct.pack('ff', u[0][0], u[0][1]))
writebuffunc(struct.pack('I', len(vert_groups)))
for vgrp in vert_groups:
writebuffunc((vgrp.name + '\n').encode())
# Get loop set from collection generated with above method;
# containing a specified loop
def _get_loop_set(lv_array, mesh, loop):
# Create loop_vert object for comparitive testing
lv = loop_vert(mesh, loop)
writebuffunc(struct.pack('I', len(self.skin)))
for s in sorted(self.skin.items(), key=operator.itemgetter(1)):
entries = s[0]
writebuffunc(struct.pack('I', len(entries)))
for ent in entries:
writebuffunc(struct.pack('If', ent[0], ent[1]))
for existing_loop_set in lv_array:
if lv in existing_loop_set:
return existing_loop_set
return None
def set_bm_layers(self, bm):
self.dlay = None
if len(bm.verts.layers.deform):
self.dlay = bm.verts.layers.deform[0]
clays = []
for cl in range(len(bm.loops.layers.color)):
clays.append(bm.loops.layers.color[cl])
self.clays = clays
# Method to find triangle opposite another triangle over two vert-indices
def _find_polygon_opposite_idxs(mesh, original_triangle, a_idx, b_idx):
for triangle in mesh.polygons:
if triangle == original_triangle:
ulays = []
for ul in range(len(bm.loops.layers.uv)):
ulays.append(bm.loops.layers.uv[ul])
self.ulays = ulays
def get_pos_idx(self, vert):
pf = vert.co.copy().freeze()
return self.pos[pf]
def get_norm_idx(self, vert):
nf = vert.normal.copy().freeze()
return self.norm[nf]
def get_skin_idx(self, vert):
sf = tuple(sorted(vert[self.dlay].items()))
return self.skin[sf]
def get_color_idx(self, loop, cidx):
cf = tuple(sorted(loop[self.clays[cidx]].items()))
return self.color[cidx][cf]
def get_uv_idx(self, loop, uidx):
uf = tuple(sorted(loop[self.ulays[uidx]].items()))
return self.uv[uidx][uf]
def loop_out(self, writebuffunc, loop):
writebuffunc(struct.pack('BII', 1, self.get_pos_idx(loop.vert), self.get_norm_idx(loop.vert)))
for cl in range(len(self.clays)):
writebuffunc('I', self.get_color_idx(loop, cl))
for ul in range(len(self.ulays)):
writebuffunc('I', self.get_uv_idx(loop, ul))
writebuffunc(struct.pack('I', self.get_skin_idx(loop.vert)))
def recursive_faces_islands(dlay, list_out, rem_list, skin_slot_set, skin_slot_count, face):
if face not in rem_list:
return None
if dlay:
for v in face.verts:
sg = tuple(sorted(v[dlay].items()))
if sg not in skin_slot_set and len(skin_slot_set) == skin_slot_count:
return False
skin_slot_set.add(sg)
list_out.append(face)
rem_list.remove(face)
for e in face.edges:
if not e.is_contiguous:
continue
if (a_idx in triangle.vertices and b_idx in triangle.vertices):
return triangle
return None
# Method to find triangle opposite another triangle over two loop-vert sets
def _find_polygon_opposite_lvs(mesh, original_triangle, lv_a, lv_b):
a_idx = lv_a[0].loop.vertex_index
b_idx = lv_b[0].loop.vertex_index
return _find_polygon_opposite_idxs(mesh, original_triangle, a_idx, b_idx)
class hmdl_mesh:
def __init__(self):
# 4-byte ID string used in generated HMDL file
self.file_identifier = '_GEN'
# Array that holds collections. A collection is a 16-bit index
# worth of vertices, elements referencing them, and a
# primitive array to draw them
self.collections = []
# If vertex index space is exceeded for a single additional vertex,
# a new collection is created and returned by this routine
def _check_collection_overflow(self, mesh, collection, rigger, uv_count):
max_bone_count = 0;
if rigger:
max_bone_count = rigger.max_bone_count
if not collection or len(collection['vertices']) >= 65535:
new_collection = {'uv_count':uv_count, 'max_bone_count':max_bone_count, 'vertices':[], 'vert_weights':[], 'tri_strips':[]}
self.collections.append(new_collection)
return new_collection, True
else:
return collection, False
# Augments draw generator with a single blender MESH data object
def add_mesh(self, mesh, rigger, uv_count):
max_bone_count = 0;
if rigger:
max_bone_count = rigger.max_bone_count
print("Optimizing mesh:", mesh.name)
opt_gpu_vert_count = 0
# First, generate compressed loop-vertex array-array-set collection
loop_vert_array = []
for vert in mesh.vertices:
loop_verts = []
for loop in mesh.loops:
if loop.vertex_index == vert.index:
_augment_loop_vert_array(loop_verts, mesh, loop, uv_count)
loop_vert_array.append(loop_verts)
# Find best collection to add mesh data into
best_collection = None
for collection in self.collections:
if (collection['uv_count'] == uv_count and
collection['max_bone_count'] == max_bone_count and
len(collection['vertices']) < 65000):
best_collection = collection
break
if not best_collection:
# Create a new one if no good one found
best_collection, is_new_collection = self._check_collection_overflow(mesh, None, rigger, uv_count)
# If rigging, start an array of bone names to be bound to contiguous tri-strips
tri_strip_bones = []
tri_strip_bones_overflow = False
# Now begin generating draw primitives
visited_polys = set()
for poly in mesh.polygons:
# Skip if already visited
if poly in visited_polys:
for f in e.link_faces:
if f == face:
continue
# Allows restart if initial polygon was not added
good = False
while not good:
# Begin a tri-strip primitive (array of vert indices)
tri_strip = []
# Temporary references to trace out strips of triangles
temp_poly = poly
# Rolling references of last two emitted loop-vert sets (b is older)
last_loop_vert_a = None
last_loop_vert_b = None
# In the event of vertex-buffer overflow, this will be made true;
# resulting in the immediate splitting of a tri-strip
is_new_collection = False
if recursive_faces_islands(dlay, list_out, rem_list, skin_slot_set, skin_slot_count, f) == False:
return False
# As long as there is a connected polygon to visit
while temp_poly:
if 0 == len(tri_strip): # First triangle in strip
# Order the loops so the last two connect to a next polygon
idx0 = mesh.loops[temp_poly.loop_indices[0]].vertex_index
idx1 = mesh.loops[temp_poly.loop_indices[1]].vertex_index
idx2 = mesh.loops[temp_poly.loop_indices[2]].vertex_index
if not _find_polygon_opposite_idxs(mesh, temp_poly, idx1, idx2):
loop_idxs = [temp_poly.loop_indices[2], temp_poly.loop_indices[0], temp_poly.loop_indices[1]]
if not _find_polygon_opposite_idxs(mesh, temp_poly, idx0, idx1):
loop_idxs = [temp_poly.loop_indices[1], temp_poly.loop_indices[2], temp_poly.loop_indices[0]]
else:
loop_idxs = temp_poly.loop_indices
# Add three loop-vert vertices to tri-strip
for poly_loop_idx in loop_idxs:
poly_loop = mesh.loops[poly_loop_idx]
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
# If rigging, ensure that necessary bones are available and get weights
weights = None
if rigger:
weights = rigger.augment_bone_array_with_lv(mesh, tri_strip_bones, loop_vert)
if weights is None:
tri_strip_bones_overflow = True
break
if loop_vert not in best_collection['vertices']:
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger, uv_count)
if is_new_collection:
break
best_collection['vertices'].append(loop_vert)
best_collection['vert_weights'].append(weights)
tri_strip.append(best_collection['vertices'].index(loop_vert))
last_loop_vert_b = last_loop_vert_a
last_loop_vert_a = loop_vert
opt_gpu_vert_count += 1
#print('appended initial loop', loop_vert[0].loop.index)
def find_opposite_edge(face, boot_edge, boot_edge2, last_edge, last_edge_2):
if last_edge_2:
for e in face.edges:
if e.verts[0] in last_edge_2.verts or e.verts[1] in last_edge_2.verts:
continue
return e
elif last_edge:
return boot_edge2
else:
return boot_edge
if is_new_collection or tri_strip_bones_overflow:
break
else: # Not the first triangle in strip; look up all three loop-verts,
# ensure it matches last-2 rolling reference, emit remaining loop-vert
# Iterate loop verts
odd_loop_vert_out = None
loop_vert_match_count = 0
for poly_loop_idx in temp_poly.loop_indices:
poly_loop = mesh.loops[poly_loop_idx]
loop_vert = _get_loop_set(loop_vert_array[poly_loop.vertex_index], mesh, poly_loop)
if (loop_vert == last_loop_vert_a or loop_vert == last_loop_vert_b):
loop_vert_match_count += 1
continue
odd_loop_vert_out = loop_vert
# Ensure there are two existing matches to continue tri-strip
if loop_vert_match_count != 2 or not odd_loop_vert_out:
break
# If rigging, ensure that necessary bones are available and get weights
weights = None
if rigger:
weights = rigger.augment_bone_array_with_lv(mesh, tri_strip_bones, odd_loop_vert_out)
if weights is None:
tri_strip_bones_overflow = True
break
# Add to tri-strip
if odd_loop_vert_out not in best_collection['vertices']:
best_collection, is_new_collection = self._check_collection_overflow(mesh, best_collection, rigger, uv_count)
if is_new_collection:
break
best_collection['vertices'].append(odd_loop_vert_out)
best_collection['vert_weights'].append(weights)
tri_strip.append(best_collection['vertices'].index(odd_loop_vert_out))
last_loop_vert_b = last_loop_vert_a
last_loop_vert_a = odd_loop_vert_out
opt_gpu_vert_count += 1
def recursive_faces_strip(list_out, rem_list, face, boot_edge, boot_edge_2, last_edge, last_edge_2):
if face not in rem_list:
return
list_out.append(face)
rem_list.remove(face)
edge = find_opposite_edge(face, boot_edge, boot_edge_2, last_edge, last_edge_2)
if not edge:
return
for f in edge.link_faces:
if f == face:
continue
recursive_faces_strip(list_out, rem_list, f, boot_edge, boot_edge_2, edge, last_edge)
break
def count_contiguous_edges(face):
retval = 0
for e in face.edges:
if e.is_contiguous:
retval += 1
return retval
def find_loop_opposite_from_other_face(face, other_face):
for e in face.edges:
if e in other_face.edges:
edge = e
break
for l in face.loops:
if l.vert in edge.verts:
continue
return l
def stripify_primitive(writebuffunc, vert_pool, prim_faces, last_loop, last_idx):
if last_loop:
vert_pool.loop_out(writebuffunc, last_loop)
last_idx += 1
if len(prim_faces) == 1:
loop = prim_faces[0].loops[0]
if last_loop:
vert_pool.loop_out(writebuffunc, loop)
last_idx += 1
if last_idx & 1:
rev = True
else:
rev = False
for i in range(3):
vert_pool.loop_out(writebuffunc, loop)
last_loop = loop
last_idx += 1
if rev:
loop = loop.link_loop_prev
else:
loop = loop.link_loop_next
return last_loop, last_idx
loop = find_loop_opposite_from_other_face(prim_faces[0], prim_faces[1])
if last_loop:
vert_pool.loop_out(writebuffunc, loop)
last_idx += 1
if last_idx & 1:
rev = True
else:
rev = False
for i in range(3):
vert_pool.loop_out(writebuffunc, loop)
last_loop = loop
last_idx += 1
if rev:
loop = loop.link_loop_prev
else:
loop = loop.link_loop_next
for i in range(1, len(prim_faces)):
loop = find_loop_opposite_from_other_face(prim_faces[i], prim_faces[i-1])
vert_pool.loop_out(writebuffunc, loop)
last_loop = loop
last_idx += 1
return last_loop, last_idx
def write_out_surface(writebuffunc, vert_pool, bm, island_faces, mat_idx):
# This polygon is good
visited_polys.add(temp_poly)
# Find a polygon directly connected to this one to continue strip
temp_poly = _find_polygon_opposite_lvs(mesh, temp_poly, last_loop_vert_a, last_loop_vert_b)
if temp_poly in visited_polys:
temp_poly = None
# Centroid of surface
centroid = Vector()
for f in island_faces:
centroid += f.calc_center_bounds()
centroid /= len(island_faces)
writebuffunc(struct.pack('fff', centroid[0], centroid[1], centroid[2]))
# Material index
writebuffunc(struct.pack('I', mat_idx))
# AABB of surface
aabb_min = Vector((9999999, 9999999, 9999999))
aabb_max = Vector((-9999999, -9999999, -9999999))
for f in island_faces:
for v in f.verts:
for c in range(3):
if v.co[c] < aabb_min[c]:
aabb_min[c] = v.co[c]
if v.co[c] > aabb_max[c]:
aabb_max[c] = v.co[c]
writebuffunc(struct.pack('fff', aabb_min[0], aabb_min[1], aabb_min[2]))
writebuffunc(struct.pack('fff', aabb_max[0], aabb_max[1], aabb_max[2]))
# Add tri-strip to element array
if len(tri_strip):
best_collection['tri_strips'].append({'mesh':mesh, 'strip':tri_strip, 'strip_bones':tri_strip_bones})
good = True
if tri_strip_bones_overflow:
tri_strip_bones = []
tri_strip_bones_overflow = False
print("Mesh contains", len(mesh.polygons), "triangles")
print("Vert count: (%d -> %d)\n" % (len(mesh.loops), opt_gpu_vert_count))
# Average normal of surface
avg_norm = Vector()
for f in island_faces:
avg_norm += f.normal
avg_norm.normalize()
writebuffunc(struct.pack('fff', avg_norm[0], avg_norm[1], avg_norm[2]))
# Generate binary vertex buffer of collection index
def generate_vertex_buffer(self, index, endian_char):
collection = self.collections[index]
if not collection:
return None
# Positions output
pos_out = []
# Generate vert buffer struct
vstruct = struct.Struct(endian_char + 'f')
# If rigging, determine maximum number of bones in this collection
max_bones = 0
for i in range(len(collection['vertices'])):
weight_count = 0
if collection['vert_weights'][i]:
weight_count = len(collection['vert_weights'][i])
if weight_count > max_bones:
max_bones = weight_count
max_bones = ROUND_UP_4(max_bones)
# Build byte array
vert_bytes = bytearray()
for i in range(len(collection['vertices'])):
loop_vert = collection['vertices'][i]
bloop = loop_vert[0]
mesh = bloop.mesh
bvert = mesh.vertices[bloop.loop.vertex_index]
#print(bvert.co)
# Verts themselves
last_loop = None
last_idx = 0
while len(island_faces):
sel_lists_local = []
for start_face in island_faces:
for e in start_face.edges:
next_edges = []
for f in e.link_faces:
if f == start_face:
continue
for eg in f.edges:
if eg == e:
continue
next_edges.append(eg)
break
if len(next_edges) == 0:
next_edges.append(None)
for e2 in next_edges:
island_local = list(island_faces)
sel_list = []
recursive_faces_strip(sel_list, island_local, start_face, e, e2, None, None)
sel_lists_local.append(sel_list)
max_count = 0
max_sl = None
for sl in sel_lists_local:
if len(sl) > max_count:
max_count = len(sl)
max_sl = sl
for f in max_sl:
island_faces.remove(f)
last_loop, last_idx = stripify_primitive(writebuffunc, vert_pool, max_sl, last_loop, last_idx)
writebuffunc(struct.pack('B', 0))
# Position
pos_out.append((bvert.co, bvert.normal))
for comp in range(4):
if comp in range(len(bvert.co)):
vert_bytes += vstruct.pack(bvert.co[comp])
else:
vert_bytes += vstruct.pack(0.0)
# Normal
for comp in range(4):
if comp in range(len(bvert.normal)):
vert_bytes += vstruct.pack(bvert.normal[comp])
else:
vert_bytes += vstruct.pack(0.0)
# Weights
weights = collection['vert_weights'][i]
for j in range(max_bones):
if j < len(weights):
vert_bytes += vstruct.pack(weights[j])
else:
vert_bytes += vstruct.pack(0.0)
# UVs
added_uvs = 0
for uv_idx in range(collection['uv_count']):
coords = mesh.uv_layers[uv_idx].data[bloop.loop.index].uv
vert_bytes += vstruct.pack(coords[0])
vert_bytes += vstruct.pack(-coords[1])
added_uvs += 1
# Pad to 16-byte alignment
if added_uvs & 1:
vert_bytes += vstruct.pack(0.0)
vert_bytes += vstruct.pack(0.0)
return collection['uv_count'], max_bones, vert_bytes, pos_out
# Generate binary element buffer of collection index
def generate_element_buffer(self, index, endian_char):
collection = self.collections[index]
if not collection:
return None
# Numeric array out
arr_out = []
# Generate element buffer struct
estruct = struct.Struct(endian_char + 'H')
# Build mesh-primitive hierarchy
last_mesh = collection['tri_strips'][0]['mesh']
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
collection_primitives = [mesh_primitives]
# Collection element byte-array
cur_offset = 0
element_bytes = bytearray()
# Last element index entry and strip length for forming degenerate strip
last_elem = None
strip_len = 0
# Last strip bone array (for rigging)
last_strip_bones = collection['tri_strips'][0]['strip_bones']
# Build single degenerate tri-strip
for strip in collection['tri_strips']:
#print('new strip', collection['tri_strips'].index(strip))
if last_mesh != strip['mesh'] or last_strip_bones != strip['strip_bones']:
#print('splitting primitive')
# New mesh; force new strip
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
cur_offset += strip_len
last_elem = None
strip_len = 0
last_mesh = strip['mesh']
mesh_primitives = {'mesh':last_mesh, 'primitives':[]}
collection_primitives.append(mesh_primitives)
elif last_elem is not None:
#print('extending primitive')
# Existing mesh being extended as degenerate strip
strip_len += 2
element_bytes += estruct.pack(last_elem)
element_bytes += estruct.pack(strip['strip'][0])
arr_out.append(last_elem)
arr_out.append(strip['strip'][0])
# If current element count is odd, add additional degenerate strip to make it even
# This ensures that the sub-strip has proper winding-order for backface culling
if (strip_len & 1):
strip_len += 1
element_bytes += estruct.pack(strip['strip'][0])
arr_out.append(strip['strip'][0])
# Primitive tri-strip byte array
for idx in strip['strip']:
#print(idx)
strip_len += 1
element_bytes += estruct.pack(idx)
arr_out.append(idx)
last_elem = idx
# Final mesh entry
mesh_primitives['primitives'].append({'offset':cur_offset, 'length':strip_len, 'bones':last_strip_bones})
cur_offset += strip_len
return collection_primitives, element_bytes, arr_out
# Generate binary draw-index buffer of collection index
def generate_index_buffer(self, collection_primitives, endian_char, rigger):
# Bytearray to fill
index_bytes = bytearray()
# Submesh count
index_bytes += struct.pack(endian_char + 'I', len(collection_primitives))
# And array
for mesh in collection_primitives:
# Primitive count
index_bytes += struct.pack(endian_char + 'I', len(mesh['primitives']))
# Primitive array
for prim in mesh['primitives']:
# If rigging, append skin index
if rigger:
skin_index = rigger.augment_skin(prim['bones'])
index_bytes += struct.pack(endian_char + 'I', skin_index)
index_bytes += struct.pack(endian_char + 'I', 2)
index_bytes += struct.pack(endian_char + 'I', prim['offset'])
index_bytes += struct.pack(endian_char + 'I', prim['length'])
return index_bytes
# C-generation operator
import bmesh
class hmdl_mesh_operator(bpy.types.Operator):
bl_idname = "scene.hmdl_mesh"
bl_label = "HMDL C mesh maker"
bl_description = "HMDL Mesh source generation utility"
@classmethod
def poll(cls, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
copy_mesh = context.object.data.copy()
copy_obj = context.object.copy()
copy_obj.data = copy_mesh
bm = bmesh.new()
bm.from_mesh(copy_mesh)
bmesh.ops.triangulate(bm, faces=bm.faces)
#to_remove = []
#for face in bm.faces:
# if face.material_index != 7:
# to_remove.append(face)
#bmesh.ops.delete(bm, geom=to_remove, context=5)
bm.to_mesh(copy_mesh)
bm.free()
context.scene.objects.link(copy_obj)
rmesh = hmdl_mesh()
rmesh.add_mesh(copy_mesh, None, 0)
str_out = '/* Vertex Buffer */\nstatic const float VERT_BUF[] = {\n'
vert_arr = rmesh.generate_vertex_buffer(0, '<')[3]
for v in vert_arr:
str_out += ' %f, %f, %f, 0.0, %f, %f, %f, 0.0,\n' % (v[0][0], v[0][1], v[0][2], v[1][0], v[1][1], v[1][2])
ebuf_arr = rmesh.generate_element_buffer(0, '<')[2]
str_out += '};\n\n/* Element Buffer */\n#define ELEM_BUF_COUNT %d\nstatic const u16 ELEM_BUF[] = {\n' % len(ebuf_arr)
for e in ebuf_arr:
str_out += ' %d,\n' % e
str_out += '};\n'
context.scene.objects.unlink(copy_obj)
bpy.data.objects.remove(copy_obj)
bpy.data.meshes.remove(copy_mesh)
context.window_manager.clipboard = str_out
self.report({'INFO'}, "Wrote mesh C to clipboard")
return {'FINISHED'}

View File

@ -6,12 +6,33 @@ Traces the 'Blender Internal' shader node structure to generate a
HECL combiner string
'''
import bpy, bpy.path, os.path
def get_texmap_idx(tex_list, name):
for i in range(len(tex_list)):
if tex_list[i] == name:
return i
retval = len(tex_list)
tex_list.append(name)
return retval
def get_texture_path(name):
if name not in bpy.data.textures:
raise RuntimeError('unable to find %s texture' % name)
tex = bpy.data.textures[name]
if tex.type != 'IMAGE':
raise RuntimeError('%s texture unsupported for %s, please save as IMAGE' % (tex.type, name))
img = tex.image
if not img:
raise RuntimeError('image not set in %s' % name)
return os.path.normpath(bpy.path.abspath(img.filepath))
# Trace color node structure
def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
def recursive_color_trace(mat_obj, mesh_obj, tex_list, node, socket=None):
if node.type == 'OUTPUT':
if node.inputs['Color'].is_linked:
return recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs['Color'].links[0].from_node, node.inputs['Color'].links[0].from_socket)
return recursive_color_trace(mat_obj, mesh_obj, tex_list, node.inputs['Color'].links[0].from_node, node.inputs['Color'].links[0].from_socket)
else:
return 'vec3(%f, %f, %f)' % (node.inputs['Color'].default_value[0],
node.inputs['Color'].default_value[1],
@ -20,14 +41,14 @@ def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
elif node.type == 'MIX_RGB':
if node.inputs[1].is_linked:
a_input = recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
a_input = recursive_color_trace(mat_obj, mesh_obj, tex_list, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
else:
a_input = 'vec3(%f, %f, %f)' % (node.inputs[1].default_value[0],
node.inputs[1].default_value[1],
node.inputs[1].default_value[2])
if node.inputs[2].is_linked:
b_input = recursive_color_trace(mat_obj, mesh_obj, blend_path, node.inputs[2].links[0].from_node, node.inputs[2].links[0].from_socket)
b_input = recursive_color_trace(mat_obj, mesh_obj, tex_list, node.inputs[2].links[0].from_node, node.inputs[2].links[0].from_socket)
else:
b_input = 'vec3(%f, %f, %f)' % (node.inputs[2].default_value[0],
node.inputs[2].default_value[1],
@ -53,7 +74,7 @@ def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
soc_from = node.inputs['Vector'].links[0].from_socket
if soc_from.node.type == 'GROUP':
matrix_str = '%s(' % soc_from.node.node_tree.name
matrix_str = '%s(%%s, ' % soc_from.node.node_tree.name
for s in range(len(soc_from.node.inputs)-1):
soc = soc_from.node.inputs[s+1]
if len(soc.links):
@ -87,42 +108,58 @@ def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
# Resolve map and matrix index
node_label = soc_from.node.label
if not matrix_str and node_label.startswith('MTX_'):
matrix_str = 'hecl_TexMtx[%d]' % int(node_label[4:])
matrix_str = 'HECLTexMtx(%%s, %d)' % int(node_label[4:])
if soc_from.name == 'UV':
uv_name = soc_from.node.uv_layer
uv_idx = mesh_obj.data.uv_layers.find(uv_name)
if uv_idx == -1:
raise RuntimeError('UV Layer "%s" doesn\'t exist' % uv_name)
uvsource_str = 'hecl_TexCoord[%d]' % uv_idx
uvsource_str = 'HECLUV(%d)' % uv_idx
elif soc_from.name == 'Normal':
uvsource_str = 'hecl_TexCoordModelViewNormal'
uvsource_str = 'HECLNormal()'
elif soc_from.name == 'View':
uvsource_str = 'hecl_TexCoordModelViewPosition'
uvsource_str = 'HECLView()'
else:
raise RuntimeError("Only the 'UV', 'Normal' and 'View' sockets may be used from 'Geometry' nodes")
if socket.name == 'Value':
if matrix_str:
return 'texture("%s:%s", %s, %s).a' % (blend_path, node.texture.name, uvsource_str, matrix_str)
else:
return 'texture("%s:%s", %s).a' % (blend_path, node.texture.name, uvsource_str)
uvsource_str = matrix_str % uvsource_str
return 'texture(%d, %s).a' % (get_texmap_idx(tex_list, node.texture.name), uvsource_str)
if socket.name == 'Color':
if matrix_str:
return 'texture("%s:%s", %s, %s)' % (blend_path, node.texture.name, uvsource_str, matrix_str)
else:
return 'texture("%s:%s", %s)' % (blend_path, node.texture.name, uvsource_str)
uvsource_str = matrix_str % uvsource_str
return 'texture(%d, %s)' % (get_texmap_idx(tex_list, node.texture.name), uvsource_str)
else:
raise RuntimeError("Only the 'Value' or 'Color' output sockets may be used from Texture nodes")
elif node.type == 'GROUP':
group_str = '%s(' % node.node_tree.name
did_first = False
for input in node.inputs:
if input.type == 'RGBA':
if did_first:
group_str += ', '
if input.is_linked:
group_str += recursive_color_trace(mat_obj, mesh_obj, tex_list, input.links[0].from_node, input.links[0].from_socket)
else:
group_str += 'vec3(%f, %f, %f)' % (input.default_value[0],
input.default_value[1],
input.default_value[2])
did_first = True
group_str += ')'
return group_str
elif node.type == 'RGB':
if node.label.startswith('DYNAMIC_'):
dynamic_index = int(node.label[8:])
return 'hecl_KColor[%d]' % dynamic_index
return 'HECLColorReg(%d)' % dynamic_index
return '%f' % node.outputs['Color'].default_value
@ -131,7 +168,7 @@ def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
if mat_obj.use_shadeless:
return 'vec3(1.0)'
else:
return 'hecl_Lighting'
return 'HECLLighting()'
else:
raise RuntimeError("HMDL is unable to process '{0}' shader nodes in '{1}'".format(node.type, mat_obj.name))
@ -139,23 +176,23 @@ def recursive_color_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
# Trace alpha node structure
def recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
def recursive_alpha_trace(mat_obj, mesh_obj, tex_list, node, socket=None):
if node.type == 'OUTPUT':
if node.inputs['Alpha'].is_linked:
return recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node.inputs['Alpha'].links[0].from_node, node.inputs['Alpha'].links[0].from_socket)
return recursive_alpha_trace(mat_obj, mesh_obj, tex_list, node.inputs['Alpha'].links[0].from_node, node.inputs['Alpha'].links[0].from_socket)
else:
return '%f' % node.inputs['Alpha'].default_value
elif node.type == 'MATH':
if node.inputs[0].is_linked:
a_input = recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node.inputs[0].links[0].from_node, node.inputs[0].links[0].from_socket)
a_input = recursive_alpha_trace(mat_obj, mesh_obj, tex_list, node.inputs[0].links[0].from_node, node.inputs[0].links[0].from_socket)
else:
a_input = '%f' % node.inputs[0].default_value
if node.inputs[1].is_linked:
b_input = recursive_alpha_trace(plat, mat_obj, mesh_obj, tex_list, mtx_dict, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
b_input = recursive_alpha_trace(mat_obj, mesh_obj, tex_list, node.inputs[1].links[0].from_node, node.inputs[1].links[0].from_socket)
else:
b_input = '%f' % node.inputs[1].default_value
@ -179,7 +216,7 @@ def recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
soc_from = node.inputs['Vector'].links[0].from_socket
if soc_from.node.type == 'GROUP':
matrix_str = '%s(' % soc_from.node.node_tree.name
matrix_str = '%s(%%s, ' % soc_from.node.node_tree.name
for s in range(len(soc_from.node.inputs)-1):
soc = soc_from.node.inputs[s+1]
if len(soc.links):
@ -213,37 +250,52 @@ def recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
# Resolve map and matrix index
node_label = soc_from.node.label
if not matrix_str and node_label.startswith('MTX_'):
matrix_str = 'hecl_TexMtx[%d]' % int(node_label[4:])
matrix_str = 'HECLTexMtx(%%s, %d)' % int(node_label[4:])
if soc_from.name == 'UV':
uv_name = soc_from.node.uv_layer
uv_idx = mesh_obj.data.uv_layers.find(uv_name)
if uv_idx == -1:
raise RuntimeError('UV Layer "%s" doesn\'t exist' % uv_name)
uvsource_str = 'hecl_TexCoord[%d]' % uv_idx
uvsource_str = 'HECLUV(%d)' % uv_idx
elif soc_from.name == 'Normal':
uvsource_str = 'hecl_TexCoordModelViewNormal'
uvsource_str = 'HECLNormal()'
elif soc_from.name == 'View':
uvsource_str = 'hecl_TexCoordModelViewPosition'
uvsource_str = 'HECLView()'
else:
raise RuntimeError("Only the 'UV', 'Normal' and 'View' sockets may be used from 'Geometry' nodes")
if socket.name == 'Value':
if matrix_str:
return 'texture("%s:%s", %s, %s).a' % (blend_path, node.texture.name, uvsource_str, matrix_str)
else:
return 'texture("%s:%s", %s).a' % (blend_path, node.texture.name, uvsource_str)
uvsource_str = matrix_str % uvsource_str
return 'texture(%d, %s).a' % (get_texmap_idx(tex_list, node.texture.name), uvsource_str)
else:
raise RuntimeError("Only the 'Value' output sockets may be used from Texture nodes")
elif node.type == 'GROUP':
group_str = '%s(' % node.node_tree.name
did_first = False
for input in node.inputs:
if input.type == 'VALUE':
if did_first:
group_str += ', '
if input.is_linked:
group_str += recursive_alpha_trace(mat_obj, mesh_obj, tex_list, input.links[0].from_node, input.links[0].from_socket)
else:
group_str += '%f' % input.default_value
did_first = True
group_str += ')'
return group_str
elif node.type == 'VALUE':
if node.label.startswith('DYNAMIC_'):
dynamic_index = int(node.label[8:])
return 'hecl_KColor[%d].a' % dynamic_index
return 'HECLColorReg(%d).a' % dynamic_index
return '%f' % node.outputs['Value'].default_value
@ -256,7 +308,7 @@ def recursive_alpha_trace(mat_obj, mesh_obj, blend_path, node, socket=None):
def shader(mat_obj, mesh_obj, blend_path):
def shader(mat_obj, mesh_obj):
if not mat_obj.use_nodes:
raise RuntimeError("HMDL *requires* that shader nodes are used; '{0}' does not".format(mat_obj.name))
@ -268,25 +320,19 @@ def shader(mat_obj, mesh_obj, blend_path):
output_node = mat_obj.node_tree.nodes['Output']
# Trace nodes and build result
color_trace_result = recursive_color_trace(mat_obj, mesh_obj, blend_path, output_node)
alpha_trace_result = recursive_alpha_trace(mat_obj, mesh_obj, blend_path, output_node)
tex_list = []
color_trace_result = recursive_color_trace(mat_obj, mesh_obj, tex_list, output_node)
alpha_trace_result = recursive_alpha_trace(mat_obj, mesh_obj, tex_list, output_node)
# Resolve texture paths
tex_paths = [get_texture_path(name) for name in tex_list]
blend_src = 'hecl_One'
blend_dest = 'hecl_Zero'
if mat_obj.game_settings.alpha_blend == 'ALPHA' or mat_obj.game_settings.alpha_blend == 'ALPHA_SORT':
blend_src = 'hecl_SrcAlpha'
blend_dest = 'hecl_OneMinusSrcAlpha'
return "HECLBlend(%s, %s)" % (color_trace_result, alpha_trace_result), tex_paths
elif mat_obj.game_settings.alpha_blend == 'ADD':
blend_src = 'hecl_SrcAlpha'
blend_dest = 'hecl_One'
# All done!
return '''\
hecl_BlendSrcFactor = %s;
hecl_BlendDestFactor = %s;
hecl_FragColor[0] = %s;
hecl_FragColor[0].a = %s;
''' % (blend_src, blend_dest, color_trace_result, alpha_trace_result)
return "HECLAdditive(%s, %s)" % (color_trace_result, alpha_trace_result), tex_paths
else:
return "HECLOpaque(%s)" % color_trace_result, tex_paths
# DEBUG operator
import bpy
@ -300,9 +346,11 @@ class hecl_shader_operator(bpy.types.Operator):
return context.object and context.object.type == 'MESH'
def execute(self, context):
shad = shader(context.object.active_material, context.object, bpy.data.filepath)
shad, texs = shader(context.object.active_material, context.object, bpy.data.filepath)
vs = bpy.data.texts.new('HECL SHADER')
vs.write(shad)
vs.write((shad + '\n'))
for tex in texs:
vs.write(tex + '\n')
return {'FINISHED'}

View File

@ -1,99 +0,0 @@
'''
HMDL Export Blender Addon
By Jack Andersen <jackoalan@gmail.com>
This file provides the means to generate an RGBA TXTR resource
buffer for packaging into an .hlpk (yes, I know this is slow,
but it's very flexible and supports Blender's procedural textures)
'''
from mathutils import Vector
import struct
def count_bits(num):
accum = 0
index = 0
for i in range(32):
if ((num >> i) & 1):
accum += 1
index = i
return accum, index
def make_txtr(tex, size=(512,512)):
if tex.type == 'IMAGE':
size = tex.image.size
# Validate image for mipmapping
can_mipmap = False
w_bits, w_idx = count_bits(size[0])
h_bits, h_idx = count_bits(size[1])
if w_bits == 1 and h_bits == 1 and tex.use_mipmap:
can_mipmap = True
# Main image 2D array
main_array = []
for y in range(size[1]):
row = []
main_array.append(row)
for x in range(size[0]):
texel = tex.evaluate((x * 2 / size[0] - 1.0, y * 2 / size[1] - 1.0, 0))
row.append(texel)
# Count potential mipmap levels
series_count = 1
if can_mipmap:
if size[0] > size[1]:
series_count = w_idx + 1
else:
series_count = h_idx + 1
# Make header
tex_bytes = struct.pack('IHHI', 0, size[0], size[1], series_count)
# Initial mipmap level
for y in main_array:
for x in y:
tex_bytes += struct.pack('BBBB',
min(255, int(x[0]*256)),
min(255, int(x[1]*256)),
min(255, int(x[2]*256)),
min(255, int(x[3]*256)))
# Prepare mipmap maker
if can_mipmap:
# Box filter
prev_array = main_array
for i in range(series_count - 1):
new_array = []
for y in range(max(len(prev_array) // 2, 1)):
y1 = prev_array[y*2]
if len(prev_array) > 1:
y2 = prev_array[y*2+1]
else:
y2 = prev_array[y*2]
new_row = []
new_array.append(new_row)
for x in range(max(len(y1) // 2, 1)):
texel_val = Vector((0,0,0,0))
texel_val += y1[x*2]
texel_val += y2[x*2]
if len(y1) > 1:
texel_val += y1[x*2+1]
texel_val += y2[x*2+1]
else:
texel_val += y1[x*2]
texel_val += y2[x*2]
texel_val /= 4
new_row.append(texel_val)
tex_bytes += struct.pack('BBBB',
min(255, int(texel_val[0]*256)),
min(255, int(texel_val[1]*256)),
min(255, int(texel_val[2]*256)),
min(255, int(texel_val[3]*256)))
prev_array = new_array
return tex_bytes

View File

@ -23,7 +23,7 @@ Positions, Normals, UV coordinates, and Weight Vectors
import struct, bpy, bmesh
from mathutils import Vector
from . import HMDLShader, HMDLSkin, HMDLMesh, HMDLTxtr
from . import HMDLShader, HMDLSkin, HMDLMesh
def get_3d_context(object_):
window = bpy.context.window_manager.windows[0]
@ -91,21 +91,9 @@ def generate_skeleton_info(armature, endian_char='<'):
# Takes a Blender 'Mesh' object (not the datablock)
# and performs a one-shot conversion process to HMDL; packaging
# into the HECL data-pipeline and returning a hash once complete
def cook(writebuffunc, platform, endianchar):
print('COOKING HMDL')
return True
mesh_obj = bpy.data.objects[bpy.context.scene.hecl_mesh_obj]
def cook(writebuffunc, mesh_obj, max_skin_banks):
if mesh_obj.type != 'MESH':
raise RuntimeError("%s is not a mesh" % mesh_obj.name)
# Partial meshes
part_meshes = set()
# Start with shader, mesh and rigging-info generation.
# Use SHA1 hashing to determine what the ID-hash will be when
# shaders are packaged; strip out duplicates
shader_set = []
rigger = None
# Normalize all vertex weights
override = get_3d_context(mesh_obj)
@ -121,21 +109,12 @@ def cook(writebuffunc, platform, endianchar):
copy_obj.data = mesh_obj.to_mesh(bpy.context.scene, True, 'RENDER')
copy_obj.scale = mesh_obj.scale
bpy.context.scene.objects.link(copy_obj)
# If skinned, establish rigging generator
if len(mesh_obj.vertex_groups):
rigger = hmdl_skin.hmdl_skin(max_bone_count, mesh_obj.vertex_groups)
# Determine count of transformation matricies to deliver to shader set
actual_max_bone_counts = [1] * len(mesh_obj.data.materials)
max_bone_count = 1
for mat_idx in range(len(mesh_obj.data.materials)):
mat = mesh_obj.data.materials[mat_idx]
count = hmdl_shader.max_transform_counter(mat, mesh_obj)
if count > 1:
actual_max_bone_counts[mat_idx] = count
if count > max_bone_count:
max_bone_count = count
# Create master triangulated BMesh and VertPool
bm_master = bmesh.new()
bm_master.from_mesh(copy_obj.data)
bmesh.ops.triangulate(bm_master, faces=bm_master.faces)
vert_pool = HMDLMesh.VertPool(bm_master)
# Sort materials by pass index first
sorted_material_idxs = []
@ -150,133 +129,78 @@ def cook(writebuffunc, platform, endianchar):
source_mat_set.discard(min_mat_idx)
# Generate shaders
actual_max_texmtx_count = 0
for mat_idx in sorted_material_idxs:
shader_hashes = []
shader_uv_count = 0
if mesh_obj.data.hecl_material_count > 0:
for grp_idx in range(mesh_obj.data.hecl_material_count):
if mesh_obj.data.hecl_material_count > 0:
writebuffunc(struct.pack('I', len(mesh_obj.data.hecl_material_count)))
for grp_idx in range(mesh_obj.data.hecl_material_count):
writebuffunc(struct.pack('I', len(sorted_material_idxs)))
for mat_idx in sorted_material_idxs:
found = False
for mat in bpy.data.materials:
if mat.name.endswith('_%u_%u' % (grp_idx, mat_idx)):
hecl_str = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
else:
hecl_str, texs = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
writebuffunc((hecl_str + '\n').encode())
writebuffunc(struct.pack('I', len(texs)))
for tex in texs:
writebuffunc((tex + '\n').encode())
found = True
break
if not found:
raise RuntimeError('uneven material set %d in %s' % (grp_idx, mesh_obj.name))
else:
writebuffunc(struct.pack('II', 1, len(sorted_material_idxs)))
for mat_idx in sorted_material_idxs:
mat = mesh_obj.data.materials[mat_idx]
hecl_str = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
hecl_str, texs = hmdl_shader.shader(mat, mesh_obj, bpy.data.filepath)
writebuffunc((hecl_str + '\n').encode())
writebuffunc(struct.pack('I', len(texs)))
for tex in texs:
writebuffunc((tex + '\n').encode())
mesh_maker = hmdl_mesh.hmdl_mesh()
# Output vert pool
vert_pool.write_out(writebuffunc, mesh_obj.vertex_groups)
# Make special version of mesh with just the relevant material;
# Also perform triangulation
mesh = bpy.data.meshes.new(copy_obj.name + '_' + str(mat_idx))
part_meshes.add(mesh)
bm = bmesh.new()
bm.from_mesh(copy_obj.data)
# Generate island meshes
for mat_idx in sorted_material_idxs:
# Make special version of mesh with just the relevant material
bm = bm_master.copy()
to_remove = []
shader_center = Vector((0,0,0))
shader_center_count = 0
for face in bm.faces:
if face.material_index != mat_idx:
to_remove.append(face)
else:
shader_center += face.calc_center_bounds()
shader_center_count += 1
shader_center /= shader_center_count
bmesh.ops.delete(bm, geom=to_remove, context=5)
bmesh.ops.triangulate(bm, faces=bm.faces)
vert_pool.set_bm_layers(bm)
dlay = None
if len(bm.verts.layers.deform):
dlay = bm.verts.layers.deform[0]
mat_faces_rem = list(bm.faces)
while len(mat_faces_rem):
the_list = []
skin_slot_set = set()
HMDLMesh.recursive_faces_islands(dlay, the_list, mat_faces_rem, skin_slot_set,
max_skin_banks, mat_faces_rem[0])
writebuffunc(struct.pack('B', 1))
HMDLMesh.write_out_surface(writebuffunc, vert_pool, bm, the_list, mat_idx)
bm.to_mesh(mesh)
bm.free()
# Optimise mesh
if rigger:
mesh_maker.add_mesh(mesh, rigger, shader_uv_count)
else:
mesh_maker.add_mesh(mesh, None, shader_uv_count)
shader_set.append((shader_hashes, mesh_maker, shader_center))
# No more surfaces
writebuffunc(struct.pack('B', 0))
# Filter out useless AABB points and generate data array
aabb = bytearray()
for comp in copy_obj.bound_box[0]:
aabb += struct.pack(endian_char + 'f', comp)
aabb += struct.pack('f', comp)
for comp in copy_obj.bound_box[6]:
aabb += struct.pack(endian_char + 'f', comp)
aabb += struct.pack('f', comp)
# Delete copied mesh from scene
bpy.context.scene.objects.unlink(copy_obj)
bpy.data.objects.remove(copy_obj)
bpy.data.meshes.remove(copy_mesh)
# Count total collections
total_collections = 0
for shader in shader_set:
total_collections += len(shader[1].collections)
# Start writing master buffer
output_data = bytearray()
output_data += aabb
output_data += struct.pack(endian_char + 'III', mesh_obj.data.hecl_material_count, len(shader_set), total_collections)
# Shader Reference Data (truncated SHA1 hashes)
if mesh_obj.data.hecl_material_count > 0:
for grp_idx in range(mesh_obj.data.hecl_material_count):
for shader in shader_set:
output_data += shader[0][grp_idx]
else:
for shader in shader_set:
for subshader in shader[0]:
output_data += subshader
# Generate mesh data
for shader in shader_set:
mesh_maker = shader[1]
output_data += struct.pack(endian_char + 'Ifff', len(mesh_maker.collections), shader[2][0], shader[2][1], shader[2][2])
for coll_idx in range(len(mesh_maker.collections)):
# Vert Buffer
uv_count, max_bones, vert_bytes, vert_arr = mesh_maker.generate_vertex_buffer(coll_idx, endian_char)
output_data += struct.pack(endian_char + 'III', uv_count, max_bones // 4, len(vert_bytes))
output_data += vert_bytes
# Elem Buffer
collection_primitives, element_bytes, elem_arr = mesh_maker.generate_element_buffer(coll_idx, endian_char)
output_data += struct.pack(endian_char + 'I', len(element_bytes))
output_data += element_bytes
# Index Buffer
index_bytes = mesh_maker.generate_index_buffer(collection_primitives, endian_char, rigger)
output_data += struct.pack(endian_char + 'I', len(index_bytes))
output_data += index_bytes
# Generate rigging data
skin_info = None
if rigger:
skin_info = rigger.generate_rigging_info(endian_char)
# Write final buffer
final_data = bytearray()
final_data = b'HMDL'
if rigger:
final_data += struct.pack(endian_char + 'IIII', 1, actual_max_texmtx_count, max_bone_count, len(skin_info))
final_data += skin_info
else:
final_data += struct.pack(endian_char + 'II', 0, actual_max_texmtx_count)
final_data += output_data
# Clean up
for mesh in part_meshes:
bpy.data.meshes.remove(mesh)
# Write final mesh object
if area_db_id is not None:
new_hash = 0
else:
new_hash = heclpak.add_object(final_data, b'HMDL', resource_name)
res_db.update_resource_stats(db_id, new_hash)
return db_id, new_hash, final_data
def draw(layout, context):
layout.prop_search(context.scene, 'hecl_mesh_obj', context.scene, 'objects')

View File

@ -143,14 +143,31 @@ def dataout_loop():
elif cmdargs[0] == 'MESHCOMPILE':
meshName = cmdargs[1]
maxIdx = int(cmdargs[2])
maxSkinBanks = int(cmdargs[3])
maxSkinBanks = int(cmdargs[2])
if meshName not in bpy.data.objects:
writepipeline(b'mesh not found')
continue
hecl.hmdl.cook(writepipebuf, bpy.data.objects[meshName])
hecl.hmdl.cook(writepipebuf, bpy.data.objects[meshName], maxSkinBanks)
elif cmdargs[0] == 'MESHCOMPILEALL':
maxSkinBanks = int(cmdargs[1])
bpy.ops.object.select_all(action='DESELECT')
join_mesh = bpy.data.meshes.new('JOIN_MESH')
join_obj = bpy.data.object.new(join_mesh.name, join_mesh)
bpy.context.scene.objects.link(join_obj)
bpy.ops.object.select_by_type(type='MESH')
bpy.context.scene.objects.active = join_obj
bpy.ops.object.join()
hecl.hmdl.cook(writepipebuf, join_obj, maxSkinBanks)
bpy.context.scene.objects.unlink(join_obj)
bpy.data.objects.remove(join_obj)
bpy.data.meshes.remove(join_mesh)
# Command loop
while True:
@ -250,7 +267,6 @@ while True:
writepipeline(b'ERROR')
elif cmdargs[0] == 'DATABEGIN':
writepipeline(b'READY')
dataout_loop()
elif cmdargs[0] == 'DATAEND':