stuff, idk

This commit is contained in:
Amazed 2021-09-29 20:37:29 +02:00
parent 1477d5263e
commit 8a946952ad
7 changed files with 1192 additions and 65 deletions

47
anm.py
View File

@ -1,3 +1,4 @@
from lib.gbs import *
from lib.skn_anm import * from lib.skn_anm import *
import os import os
@ -9,9 +10,51 @@ def read_anm(filepath) -> AnmAnim:
return r return r
def read_skn(filepath) -> AnmSkin:
with open(filepath, "rb") as fp:
init_filetrack(os.fstat(fp.fileno()).st_size)
r = AnmFile.parse(fp, AnmSkin)
print("Read %s%% of file" % stat_track())
return r
def rotate_vec3_by_quaternion(vec: Vec3, quat: Quat):
u = Vec3(quat.x, quat.y, quat.z)
s = quat.w
return 2 * Vec3.dot(u, vec) * u + (s*s - Vec3.dot(u, u)) * vec + 2.0 * s * cross(u, vec)
def main(): def main():
anm = read_anm("/home/tasty/Projects/gck-map-extract-objects/anm_skn/verm_fly.anm") skin = read_skn("/home/tasty/Projects/gck-map-extract-objects/anm_skn/rp_l0.skn")
pprint(anm) anm = read_anm("/home/tasty/Projects/gck-map-extract-objects/anm_skn/rp_run_scan1.anm")
#anm = read_anm("/home/tasty/Projects/gck-map-extract-objects/anm_skn/kb_roar2.anm")
#pprint(skin)
print("----------------------------------------------------------------")
print("----------------------------------------------------------------")
print("----------------------------------------------------------------")
print("----------------------------------------------------------------")
print("----------------------------------------------------------------")
# pprint(anm)
animation = AnmAnimation.create(skin, anm)
gbs = GbsData()
gbs.read("/home/tasty/Projects/gck-map-extract-objects/all_gbs/rp_l0.gbs")
animation.eval(1000, gbs.vertices, 0x4c, 0)
vbase = 0
for binder in animation.binders:
for bindings in binder.bindings:
cluster = bindings.cluster
node = bindings.node
for cluster_vertex_i in range(len(cluster.vertices)):
cluster_vertex = cluster.vertices[cluster_vertex_i]
cluster_vertex += node.transform.affine.translation
cluster_vertex = rotate_vec3_by_quaternion(cluster_vertex, node.transform.affine.quat)
gbs.vertices[vbase] = cluster_vertex
vbase += 1
print(vbase)
gbs.save_obj("/home/tasty/Projects/gck-map-extract-objects/mc_eval_0.obj")
if __name__ == "__main__": if __name__ == "__main__":

141
id_to_gbs.py Normal file
View File

@ -0,0 +1,141 @@
import os
import json
decoded_dir = "/home/tasty/Nextcloud/docs/giants_private/binlnk (compiler for xxbin files)/all/"
def _all_gbs():
all_gbs = {}
objdata = {}
objsets = {}
files = os.listdir(decoded_dir)
for file in files:
fullpath = decoded_dir + file
print("reading %s" % fullpath)
with open(fullpath) as fp:
currblock = None
curr_objset = None
curr_obj = None
lod_found = False
audio_wav = None
audio_dist = None
for line in fp.readlines():
line = line.strip()
if not line:
continue
if line == "[objdata]":
currblock = line
continue
if line == "[objset]":
currblock = line
continue
if line == "[object]":
currblock = line
continue
if line.startswith("[") and line.endswith("]"):
currblock = None
continue
if currblock == "[objdata]":
objdata_id, gbs_name = line.split(" ")
objdata_id = int(objdata_id)
if objdata_id in objdata:
# raise Exception("%s was already in objdata ?! %s" % (objdata_id, objdata[objdata_id]))
pass
gbs_name = gbs_name.replace("\"", "")
print("OBJDATA[%s] = %s" % (objdata_id, gbs_name))
objdata[objdata_id] = gbs_name
if currblock == "[objset]":
line_attrs = line.split(" ")
if line_attrs[0] == "ID":
curr_objset = int(line_attrs[1])
lod_found = False
if line_attrs[0] == "LOD" and not lod_found:
lod_found = True
objset_objdata = int(line_attrs[1])
print("OBJSETS[%s] = %s" % (curr_objset, objset_objdata))
objsets[curr_objset] = objset_objdata
if currblock == "[object]":
line_attrs = line.split(" ")
if line_attrs[0] == "ID":
curr_obj = int(line_attrs[1])
if line_attrs[0] == "OS":
obj_objset = int(line_attrs[1])
if line_attrs[0] == "AmbientStreamLoop":
audio_wav = line_attrs[2].strip("\"")
audio_dist = float(line_attrs[4])
if line_attrs[0] == "Done":
try:
print("OBJ[%s] = OBJSETS[%s] = OBJDATA[%s] = %s" % (
curr_obj, obj_objset, objsets[obj_objset], objdata[objsets[obj_objset]]))
all_gbs[curr_obj] = {"model": objdata[objsets[obj_objset]]}
if audio_wav:
all_gbs[curr_obj]["audio"] = audio_wav
all_gbs[curr_obj]["audiodist"] = audio_dist
except KeyError:
print("ERR: could not find OBJSET %s" % obj_objset)
continue
audio_wav = None
audio_dist = None
return all_gbs
def map_txt_to_json(map_txt_path):
all_objs = []
curr_obj = None
with open(map_txt_path) as fp:
for line in fp.readlines():
line = line.strip()
if not line:
continue
line_attrs = line.split(" ")
if line_attrs[0] == "ObjectRef6":
if curr_obj:
all_objs.append(curr_obj)
curr_obj = {
"id": int(line_attrs[1]),
"x": float(line_attrs[2]),
"y": float(line_attrs[3]),
"z": float(line_attrs[4]),
"angle": float(line_attrs[5]),
"angle_2": float(line_attrs[6]),
"angle_3": float(line_attrs[7]),
"scale": 1
}
if line_attrs[0] == "ObjectRef":
if curr_obj:
all_objs.append(curr_obj)
curr_obj = {
"id": int(line_attrs[1]),
"x": float(line_attrs[2]),
"y": float(line_attrs[3]),
"z": float(line_attrs[4]),
"angle": float(line_attrs[5]),
"angle_2": 0,
"angle_3": 0,
"scale": 1
}
if line_attrs[0] == "Scale":
curr_obj["scale"] = float(line_attrs[1])
with open(map_txt_path+".json", "w") as fp:
json.dump(all_objs, fp)
return all_objs
def create_id_to_gbs_json():
g = _all_gbs()
with open("id_to_gbs.json", "w") as fp:
json.dump(g, fp)
if __name__ == '__main__':
create_id_to_gbs_json()
# m = map_txt_to_json("/home/tasty/Projects/Giants/assets/terrains/square_one_1/w_M_3Way_Tigs - Threeway - Square One.bin.txt")
# print(m)

View File

@ -14,6 +14,22 @@ def init_filetrack(size):
file_track = [0] * size file_track = [0] * size
def show_filetrack():
i = 0
curr = 1
curr_len = 0
while i < len(file_track):
if file_track[i] != curr:
if curr == 0:
print("%s starts at %s, ends at %s for %s (%s) bytes (%s ints)" % (curr, i-curr_len, i, curr_len, hex(curr_len), curr_len/4))
curr = file_track[i]
curr_len = 1
else:
curr_len += 1
i += 1
print(file_track)
def advance_track(offset, size): def advance_track(offset, size):
if not file_track: if not file_track:
return return

File diff suppressed because it is too large Load Diff

32
map2obj.py Normal file
View File

@ -0,0 +1,32 @@
from giantslib.game.map import Map
def main():
basedir = "/home/tasty/Projects/gck-map-extract-objects/intro_island/"
map_path = basedir + "intro_island.zip"
m = Map(map_path)
m.save_heightmap(basedir + "heightmap.png")
m.save_colormap(basedir + "lightmap.png")
print(m.minheight, m.maxheight, m.stretch)
return
indexes = []
for tri in m.triangles:
index = m.vertices.index(tri)
indexes.append(index)
print(indexes)
assert(len(indexes) % 3 == 0)
assert(max(indexes) < len(m.vertices))
with open("/home/tasty/Projects/Giants/assets/terrains/test.obj", "w") as fp:
for v in m.vertices:
fp.write("v %s %s %s\n" % (v[0], v[1], v[2]))
t = 0
while t < len(indexes):
fp.write("f %s/%s %s/%s %s/%s\n" % (indexes[t]+1, indexes[t]+1, indexes[t+1]+1, indexes[t+1]+1, indexes[t+2]+1, indexes[t+2]+1))
t += 3
if __name__ == '__main__':
main()

View File

@ -16,6 +16,7 @@ GBSFlagMaxLit = (1 << 31)
def check(gbs_file, materials): def check(gbs_file, materials):
return
with open("data.json") as fp: with open("data.json") as fp:
d = json.load(fp) d = json.load(fp)
@ -109,6 +110,8 @@ class OBJObject:
self.faces: List[OBJFace] = [] self.faces: List[OBJFace] = []
self.name = "root" self.name = "root"
self.material: Union[None, OBJMaterial] = None self.material: Union[None, OBJMaterial] = None
self.vref_start = 0
self.vref_count = 0
class OBJFace: class OBJFace:
@ -126,14 +129,19 @@ def obj_read_materials(matlib_file) -> List[OBJMaterial]:
line = line.strip() line = line.strip()
arr = line.split(" ") arr = line.split(" ")
if arr[0] == "newmtl": if arr[0] == "newmtl":
if len(arr) <= 1:
curr_mat = None
continue
mat = OBJMaterial() mat = OBJMaterial()
materials.append(mat) materials.append(mat)
mat.name = arr[1].rstrip() mat.name = arr[1].rstrip()
curr_mat = mat curr_mat = mat
if arr[0] == "map_Ka" or arr[0] == "map_Kd": if arr[0] == "map_Ka" or arr[0] == "map_Kd":
if curr_mat:
matname_without_ext = "".join(arr[1:]).split("/")[-1] matname_without_ext = "".join(arr[1:]).split("/")[-1]
matname_without_ext = "".join(matname_without_ext.split(".")[0:-1]) matname_without_ext = "".join(matname_without_ext.split(".")[0:-1])
curr_mat.texture = matname_without_ext curr_mat.texture = matname_without_ext
# print("Set %s to %s" % (curr_mat.texture, curr_mat.name))
return materials return materials
@ -205,6 +213,7 @@ class GbsData:
normals: List[Vec3] = [] normals: List[Vec3] = []
objects: List[OBJObject] = [] objects: List[OBJObject] = []
root_obj = OBJObject() root_obj = OBJObject()
max_objs: List[MaxObj] = []
objects.append(root_obj) objects.append(root_obj)
last_material = None last_material = None
@ -258,22 +267,45 @@ class GbsData:
f.index_normals.append(v3_normal_index) f.index_normals.append(v3_normal_index)
current_object.faces.append(f) current_object.faces.append(f)
if arr[0] == "o": if arr[0] == "o":
obj_name = arr[1].rstrip() obj_line = arr[-1].rstrip()
try:
end = obj_line.index("_#_")
except ValueError:
end = None
obj_name = obj_line[0:end]
o = OBJObject() o = OBJObject()
o.name = obj_name o.name = obj_name
o.material = last_material o.material = last_material
if end:
meta = obj_line[end+3:].split("_")
o.vref_start = int(meta[0])
o.vref_count = int(meta[1])
objects.append(o) objects.append(o)
if len(current_object.faces) == 0: if len(current_object.faces) == 0:
objects.remove(current_object) objects.remove(current_object)
current_object = o current_object = o
if arr[0] == "usemtl": if arr[0] == "usemtl" and len(arr) > 1:
mtl_name = arr[1].rstrip() mtl_name = arr[1].rstrip()
if mtl_name:
mtl = [mat for mat in materials if mat.name == mtl_name][0] mtl = [mat for mat in materials if mat.name == mtl_name][0]
current_object.material = mtl current_object.material = mtl
last_material = mtl last_material = mtl
if arr[0] == "mtllib": if arr[0] == "mtllib":
matlib_file = arr[1].rstrip() matlib_file = arr[1].rstrip()
materials = obj_read_materials("%s/%s" % (os.path.dirname(obj_file), matlib_file)) obj_mat = "%s/%s" % (os.path.dirname(obj_file), matlib_file)
print(obj_mat)
materials = obj_read_materials(obj_mat)
if arr[0] == "#" and arr[1] == "maxobj":
max_obj = MaxObj()
max_obj.vstart = int(arr[2])
max_obj.vcount = int(arr[3])
max_obj.nstart = int(arr[4])
max_obj.ncount = int(arr[5])
max_obj.noffset = int(arr[6])
max_objs.append(max_obj)
num_faces = sum([len(o.faces) for o in objects]) num_faces = sum([len(o.faces) for o in objects])
print("%s vertices, %s uvs, %s normals, %s objects, %s materials, %s faces" % (len(vertices), len(uvs), len(normals), len(objects), len(materials), num_faces)) print("%s vertices, %s uvs, %s normals, %s objects, %s materials, %s faces" % (len(vertices), len(uvs), len(normals), len(objects), len(materials), num_faces))
@ -339,15 +371,27 @@ class GbsData:
data.put_float(v.v * -1) data.put_float(v.v * -1)
# max objects # max objects
print("There are %s max objects" % len(max_objs))
if not max_objs:
data.put_long(1) # 1 big object data.put_long(1) # 1 big object
data.put_long(0) # vstart data.put_long(0) # vstart
data.put_long(len_vertices) # vcount data.put_long(len_vertices) # vcount
data.put_long(0) # nstart data.put_long(0) # nstart
data.put_long(0) # ncount data.put_long(0) # ncount
data.put_long(0) # noffset ??? data.put_long(0) # noffset ???
else:
data.put_long(len(max_objs))
for i in range(len(max_objs)):
max_obj = max_objs[i]
data.put_long(max_obj.vstart)
data.put_long(max_obj.vcount)
data.put_long(max_obj.nstart)
data.put_long(max_obj.ncount)
data.put_long(max_obj.noffset)
# start write subobjects # start write subobjects
data.put_long(len(objects)) data.put_long(len(objects))
print("THERE ARE %s subobjects" % len(objects))
for obj in objects: for obj in objects:
data.put_string_size(obj.name, 32) data.put_string_size(obj.name, 32)
data.put_long(0) # max obj index data.put_long(0) # max obj index
@ -360,8 +404,8 @@ class GbsData:
data.put_short(face.index_uvs[1] - 1) data.put_short(face.index_uvs[1] - 1)
data.put_short(face.index_uvs[2] - 1) data.put_short(face.index_uvs[2] - 1)
data.put_long(0) # verticeref_start data.put_long(obj.vref_start) # verticeref_start
data.put_long(nverts) # verticeref_count data.put_long(nverts if obj.vref_count == 0 else obj.vref_count) # verticeref_count
if options & GBSFlagUVs: if options & GBSFlagUVs:
data.put_string_size(obj.material.texture, 32) # texture data.put_string_size(obj.material.texture, 32) # texture
data.put_string_size(obj.material.texture, 32) # bump data.put_string_size(obj.material.texture, 32) # bump
@ -391,6 +435,7 @@ def convert_obj(path):
output = "%s/%s.gbs" % (os.path.dirname(os.path.abspath(path)), os.path.basename(path)) output = "%s/%s.gbs" % (os.path.dirname(os.path.abspath(path)), os.path.basename(path))
print("Done! Output: %s" % output) print("Done! Output: %s" % output)
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("path") parser.add_argument("path")

103
skn.py
View File

@ -1,5 +1,8 @@
from lib.skn_anm import * from lib.skn_anm import *
from lib.gbs import * from lib.gbs import *
import struct
import os
import shutil
def read_skn(filepath) -> AnmSkin: def read_skn(filepath) -> AnmSkin:
@ -27,8 +30,9 @@ def test():
print("%s: dist=%s" % (mini_dist_v, mini_dist_v.distance(my_vertex))) print("%s: dist=%s" % (mini_dist_v, mini_dist_v.distance(my_vertex)))
def print_vertices(): def print_vertices(file):
skn = read_skn("/home/tasty/Projects/gck-map-extract-objects/test/rp_l0.apatch.gzp.skn") skn = read_skn(file)
pprint(skn)
for obj_name in skn.default_obj_dictionnary: for obj_name in skn.default_obj_dictionnary:
obj = skn.default_obj_dictionnary[obj_name] obj = skn.default_obj_dictionnary[obj_name]
for cluster in obj.clusters: for cluster in obj.clusters:
@ -54,13 +58,12 @@ def link_gbs_skn():
vbase += obj.total_number_vertices vbase += obj.total_number_vertices
def main(): def main(file):
skn = read_skn("/home/tasty/Projects/gck-map-extract-objects/test/rp_l0.apatch.gzp.skn") skn = read_skn(file)
pprint(skn) pprint(skn)
vbase = 0 vbase = 0
for tree in skn.tree_dictionnary: for tree in skn.tree_dictionnary:
for obj_name in skn.tree_dictionnary[tree]: for obj_name in skn.tree_dictionnary[tree]:
print(obj_name)
obj = skn.tree_dictionnary[tree][obj_name] obj = skn.tree_dictionnary[tree][obj_name]
for cluster in obj.clusters: for cluster in obj.clusters:
if cluster.num_vertices <= 0: if cluster.num_vertices <= 0:
@ -74,12 +77,96 @@ def main():
vbase = 0 vbase = 0
for obj_name, obj in skn.default_obj_dictionnary.items(): for obj_name, obj in skn.default_obj_dictionnary.items():
for cluster in obj.clusters: for cluster in obj.clusters:
## 81: part of hair
## 12: left eye
## 9/13: leg or arm
## 15/17 is a boob
if cluster.handle == 15:
# fp.write("name %s %s %s %s %s %s %s\n" % (cluster.bone_name.strip().replace(" ", ""), cluster.bounding_box[0].x, cluster.bounding_box[0].y, cluster.bounding_box[0].z, cluster.bounding_box[1].x, cluster.bounding_box[1].y, cluster.bounding_box[1].z)) # fp.write("name %s %s %s %s %s %s %s\n" % (cluster.bone_name.strip().replace(" ", ""), cluster.bounding_box[0].x, cluster.bounding_box[0].y, cluster.bounding_box[0].z, cluster.bounding_box[1].x, cluster.bounding_box[1].y, cluster.bounding_box[1].z))
for vertex in cluster.vertices: for vertex in cluster.vertices:
fp.write("index %s\n" % (vertex.index + vbase)) # fp.write("index %s\n" % (vertex.index + vbase))
# fp.write("offset %s %s %s\n" % (vertex.x, vertex.y, vertex.z)) fp.write("%s %s %s 0 0 0\n" % (vertex.x, vertex.y, vertex.z))
vbase += obj.total_number_vertices vbase += obj.total_number_vertices
def replace_vertex(x1, y1, z1, x2, y2, z2, skn_file):
final = skn_file+".repl"
if not os.path.exists(final):
shutil.copy(skn_file, final)
with open(final, "rb") as fp:
fp.seek(0)
bytesarr = bytearray(fp.read())
bfind = x1+y1+z1
brepl = x2+y2+z2
bytesarr_new = bytesarr.replace(bfind, brepl)
assert(bytesarr_new != bytesarr)
with open(final, "ab+") as fp:
fp.seek(0)
fp.write(bytesarr_new)
def replace_vertices(file_input, skn_file):
with open(skn_file, "rb") as fp:
orig_bytes = bytearray(fp.read())
with open(file_input, "r") as fp:
lines = fp.readlines()
to_search = []
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
a = line.split(" ")
print(line)
assert(len(a) == 6)
x1 = struct.pack("<f", float(a[0]))
y1 = struct.pack("<f", float(a[1]))
z1 = struct.pack("<f", float(a[2]))
if (x1, y1, z1) in to_search:
continue
else:
to_search.append((x1, y1, z1,))
x2 = struct.pack("<f", float(a[3]))
y2 = struct.pack("<f", float(a[4]))
z2 = struct.pack("<f", float(a[5]))
bfind = x1 + y1 + z1
brepl = x2 + y2 + z2
new_bytes = orig_bytes.replace(bfind, brepl)
assert(new_bytes != orig_bytes)
orig_bytes = new_bytes
final = skn_file+".repl"
if not os.path.exists(final):
shutil.copy(skn_file, final)
with open(final, "wb") as fp:
print("writing final file %s..." % final)
fp.write(orig_bytes)
def combine_files(input, newfile):
with open(input, "r") as in_fp:
in_lines = in_fp.readlines()
with open(newfile, "r") as out_fp:
out_lines = out_fp.readlines()
assert(len(in_lines) == len(out_lines))
for i in range(len(in_lines)):
out_lines[i] = out_lines[i].replace("\n", "") + " " + in_lines[i]
with open(newfile+".combined", "w") as o_fp:
o_fp.writelines(out_lines)
if __name__ == "__main__": if __name__ == "__main__":
main() file = "/home/tasty/Projects/gck-map-extract-objects/test/MC_shotgun_L0.xx_mecc_flick.gzp.skn"
s = read_skn(file)
pprint(s)