284 lines
9.3 KiB
GDScript
284 lines
9.3 KiB
GDScript
extends Node
|
|
|
|
class ObjectManager:
|
|
var GIANTS_PATH = "/home/tasty/Jeux/Giants Citizen Kabuto"
|
|
var index = {}
|
|
|
|
func build() -> void:
|
|
var bin_dir = Directory.new()
|
|
if bin_dir.open(GIANTS_PATH+"/Bin") == OK:
|
|
bin_dir.list_dir_begin()
|
|
var file_name = bin_dir.get_next()
|
|
while (file_name != ""):
|
|
if not bin_dir.current_is_dir() and file_name.ends_with(".gzp"):
|
|
# print("Found GZP file: " + file_name)
|
|
var gzp_file = File.new()
|
|
gzp_file.open(GIANTS_PATH+"/Bin/"+file_name, File.READ)
|
|
read_gzp_to_indexes(gzp_file)
|
|
gzp_file.close()
|
|
file_name = bin_dir.get_next()
|
|
|
|
func read_gzp_to_indexes(file: File) -> void:
|
|
var checksum = file.get_32()
|
|
assert(checksum == 0x6608F101)
|
|
var meta_info_offset = file.get_32()
|
|
file.seek(meta_info_offset)
|
|
var unk = file.get_32()
|
|
var entries_count = file.get_32()
|
|
for i in range(entries_count):
|
|
var compressed_size = file.get_32()
|
|
var original_size = file.get_32()
|
|
var file_time = file.get_32()
|
|
var content_offset = file.get_32() + 16
|
|
var compression = file.get_8()
|
|
var name_length = file.get_8()
|
|
var name = file.get_buffer(name_length).get_string_from_ascii()
|
|
if name.ends_with(".tga") or name.ends_with(".gbs"):
|
|
index[name] = file.get_path_absolute()
|
|
if name.ends_with(".gbs"):
|
|
print(name)
|
|
|
|
func read_file_in_gzp(filename: String) -> StreamPeerBuffer:
|
|
var gzp_file = index[filename]
|
|
var res = StreamPeerBuffer.new()
|
|
var file = File.new()
|
|
if not file.open(gzp_file, File.READ) == OK:
|
|
print("Could not open "+filename)
|
|
return res
|
|
var checksum = file.get_32()
|
|
assert(checksum == 0x6608F101)
|
|
var meta_info_offset = file.get_32()
|
|
file.seek(meta_info_offset)
|
|
var unk = file.get_32()
|
|
var entries_count = file.get_32()
|
|
for i in range(entries_count):
|
|
var compressed_size = file.get_32()
|
|
var original_size = file.get_32()
|
|
var file_time = file.get_32()
|
|
var content_offset = file.get_32() + 16
|
|
var compression = file.get_8()
|
|
var name_length = file.get_8()
|
|
var name = file.get_buffer(name_length).get_string_from_ascii()
|
|
if name == filename:
|
|
var current_offset = file.get_position()
|
|
file.seek(content_offset)
|
|
var data = file.get_buffer(compressed_size)
|
|
file.seek(current_offset)
|
|
if compression == 1:
|
|
data = _decompress(data, original_size)
|
|
res.data_array = data
|
|
file.close()
|
|
return res
|
|
|
|
func load_model(gbs_file: String):
|
|
var pos = 0
|
|
var gbs_stream = read_file_in_gzp(gbs_file)
|
|
var model = load("res://model.gd").Model.new()
|
|
var modelitem_gd = load("res://modelitem.gd")
|
|
model.magic = gbs_stream.get_32()
|
|
model.u1 = gbs_stream.get_32()
|
|
model.basepoints = gbs_stream.get_32()
|
|
print("basepoints="+str(model.basepoints))
|
|
model.basepoint = []
|
|
model.basepoint.resize(model.basepoints)
|
|
for s in range(model.basepoints):
|
|
var x = gbs_stream.get_float()
|
|
var y = gbs_stream.get_float()
|
|
var z = gbs_stream.get_float()
|
|
model.basepoint[s] = [x, y, z]
|
|
|
|
if model.u1 == 7:
|
|
model.texpos = gbs_stream.get_32()
|
|
model.vertexrefs = gbs_stream.get_32()
|
|
model.vertexref.resize(model.vertexrefs)
|
|
for s in range(model.vertexrefs):
|
|
model.vertexref[s] = gbs_stream.get_16()
|
|
|
|
model.points = gbs_stream.get_32()
|
|
model.point_1.resize(model.points)
|
|
print("points="+str(model.points))
|
|
if model.u1 == 7:
|
|
model.point_2.resize(model.points)
|
|
model.point_uv.resize(model.points)
|
|
# model.point_c.resize(model.points*3)
|
|
for s in range(model.points):
|
|
model.point_1[s] = gbs_stream.get_16()
|
|
if model.u1 == 7:
|
|
for s in range(model.points):
|
|
model.point_2[s] = gbs_stream.get_16()
|
|
for s in range(model.points):
|
|
model.point_uv[s] = []
|
|
model.point_uv[s].resize(2)
|
|
model.point_uv[s][0] = gbs_stream.get_float()
|
|
model.point_uv[s][1] = gbs_stream.get_float()
|
|
|
|
for s in range(model.points):
|
|
var r = gbs_stream.get_8()
|
|
var g = gbs_stream.get_8()
|
|
var b = gbs_stream.get_8()
|
|
model.point_c.append(r)
|
|
model.point_c.append(g)
|
|
model.point_c.append(b)
|
|
|
|
var i = gbs_stream.get_32()
|
|
print("i="+str(i))
|
|
model.ref1.resize(i)
|
|
for s in range(i):
|
|
model.ref1[s] = []
|
|
model.ref1[s].resize(5)
|
|
model.ref1[s][0] = gbs_stream.get_32()
|
|
model.ref1[s][1] = gbs_stream.get_32()
|
|
model.ref1[s][2] = gbs_stream.get_32()
|
|
model.ref1[s][3] = gbs_stream.get_32()
|
|
model.ref1[s][4] = gbs_stream.get_32()
|
|
|
|
model.parts = gbs_stream.get_32()
|
|
print("parts="+str(model.parts))
|
|
model.part.resize(model.parts)
|
|
for p in range(model.parts):
|
|
model.part[p] = modelitem_gd.ModelItem.new()
|
|
model.part[p].objname = gbs_stream.get_string(32)
|
|
print("Objname: "+model.part[p].objname)
|
|
model.part[p].objindex = gbs_stream.get_32()
|
|
model.part[p].refs = gbs_stream.get_32()
|
|
model.part[p].wordz = gbs_stream.get_32()
|
|
model.part[p].refs_ = gbs_stream.get_16()
|
|
model.part[p].triangle.resize(model.part[p].refs)
|
|
for s in range(model.part[p].refs):
|
|
model.part[p].triangle[s] = []
|
|
model.part[p].triangle[s].resize(3)
|
|
model.part[p].triangle[s][0] = gbs_stream.get_16()
|
|
model.part[p].triangle[s][1] = gbs_stream.get_16()
|
|
model.part[p].triangle[s][2] = gbs_stream.get_16()
|
|
model.part[p].refstart = gbs_stream.get_32()
|
|
model.part[p].refnum = gbs_stream.get_32()
|
|
model.part[p].texture = gbs_stream.get_string(32)
|
|
model.part[p].bumptexture = gbs_stream.get_string(32)
|
|
model.part[p].falloff = gbs_stream.get_float()
|
|
model.part[p].blend = gbs_stream.get_float()
|
|
model.part[p].flags = gbs_stream.get_32()
|
|
model.part[p].emissive = gbs_stream.get_32()
|
|
model.part[p].ambiant = gbs_stream.get_32()
|
|
model.part[p].diffuse = gbs_stream.get_32()
|
|
model.part[p].specular = gbs_stream.get_32()
|
|
model.part[p].power = gbs_stream.get_float()
|
|
model.tex.resize(model.parts)
|
|
for s in range(model.parts):
|
|
if model.part[s].texture != "":
|
|
model.tex[s] = load_texture(model.part[s].texture+".tga")
|
|
else:
|
|
model.tex[s] = null
|
|
|
|
model.bounds1 = model.basepoint[0]
|
|
model.bounds2 = model.basepoint[0]
|
|
model.maxbound = 0
|
|
for s in range(model.basepoints):
|
|
var r = sqrt(pow(model.basepoint[s][0], 2) + pow(model.basepoint[s][1], 2) + pow(model.basepoint[s][2], 2))
|
|
if r > model.maxbound:
|
|
model.maxbound = r
|
|
|
|
for s in range(model.basepoints):
|
|
if model.basepoint[s][0] < model.bounds1[0]:
|
|
model.bounds1[0] = model.basepoint[s][0]
|
|
if model.basepoint[s][1] < model.bounds1[1]:
|
|
model.bounds1[1] = model.basepoint[s][1]
|
|
if model.basepoint[s][2] < model.bounds1[2]:
|
|
model.bounds1[2] = model.basepoint[s][2]
|
|
|
|
if model.basepoint[s][0] > model.bounds2[0]:
|
|
model.bounds2[0] = model.basepoint[s][0]
|
|
if model.basepoint[s][1] > model.bounds2[1]:
|
|
model.bounds2[1] = model.basepoint[s][1]
|
|
if model.basepoint[s][2] > model.bounds2[2]:
|
|
model.bounds2[2] = model.basepoint[s][2]
|
|
|
|
return model
|
|
|
|
func load_texture(texture_file: String) -> ImageTexture:
|
|
print("Loading texture "+texture_file)
|
|
var tmpDirectory = Directory.new()
|
|
if not tmpDirectory.dir_exists("res://tmp"):
|
|
tmpDirectory.make_dir("res://tmp")
|
|
var stream = read_file_in_gzp(texture_file)
|
|
var tmpFile = File.new()
|
|
tmpFile.open("res://tmp/"+texture_file, File.WRITE)
|
|
tmpFile.store_buffer(stream.data_array)
|
|
print("Saved to: "+tmpFile.get_path_absolute())
|
|
tmpFile.close()
|
|
var img = Image.new()
|
|
var tex = ImageTexture.new()
|
|
img.load("res://tmp/"+texture_file)
|
|
tex.create_from_image(img)
|
|
# tmpDirectory.remove("res://tmp/"+texture_file)
|
|
return tex
|
|
|
|
func load_model_mesh(model) -> MeshInstance:
|
|
var meshinstance = MeshInstance.new()
|
|
var mesh = Mesh.new()
|
|
for i in range(model.parts):
|
|
var st = SurfaceTool.new()
|
|
st.begin(Mesh.PRIMITIVE_TRIANGLES)
|
|
var mat = SpatialMaterial.new()
|
|
mat.params_cull_mode = SpatialMaterial.CULL_DISABLED
|
|
if model.tex[i]:
|
|
mat.albedo_texture = model.tex[i]
|
|
else:
|
|
mat.vertex_color_use_as_albedo = true
|
|
st.set_material(mat)
|
|
|
|
assert(len(model.basepoint) % 3 == 0)
|
|
|
|
for j in range(len(model.part[i].triangle)):
|
|
for k in range(3):
|
|
var l = model.part[i].triangle[j][k]
|
|
st.add_uv(Vector2(model.point_uv[l][0], model.point_uv[l][1]))
|
|
st.add_color(Color(
|
|
model.point_c[l*3+0] / 255.0 + (model.part[i].diffuse & 255) / 255.0,
|
|
model.point_c[l*3+1] / 255.0 + ((model.part[i].diffuse >> 8) & 255) / 255.0,
|
|
model.point_c[l*3+2] / 255.0 + ((model.part[i].diffuse >> 16) & 255) / 255.0))
|
|
l = model.point_1[l]
|
|
st.add_vertex(Vector3(
|
|
model.basepoint[l][0],
|
|
model.basepoint[l][2],
|
|
model.basepoint[l][1]))
|
|
mesh = st.commit(mesh)
|
|
|
|
meshinstance.mesh = mesh
|
|
return meshinstance
|
|
|
|
func _decompress(compressed_bytes: PoolByteArray, original_size: int) -> PoolByteArray:
|
|
var i = 0
|
|
var j = 0
|
|
var dec_byte = 0
|
|
var dec_bits = 8
|
|
var buff_start = 0xFEE
|
|
|
|
var res = PoolByteArray()
|
|
res.resize(original_size)
|
|
if (original_size == 0):
|
|
return res
|
|
|
|
while j < original_size:
|
|
if dec_bits == 8:
|
|
dec_byte = compressed_bytes[i]
|
|
i+=1
|
|
dec_bits = 0
|
|
if (dec_byte >> dec_bits & 1) == 0:
|
|
var dec_pos = ((compressed_bytes[i] + ((compressed_bytes[i + 1] & 0xF0) << 4) - buff_start - j) & 0xFFF) - 0x1000 + j
|
|
var dec_len = (compressed_bytes[i + 1] & 0xF) + 3
|
|
i+=2
|
|
while dec_len > 0:
|
|
if dec_pos >= 0:
|
|
res[j] = res[dec_pos]
|
|
else:
|
|
res[j] = 32
|
|
j+=1
|
|
dec_pos += 1
|
|
dec_len -= 1
|
|
else:
|
|
res[j] = compressed_bytes[i]
|
|
i+=1
|
|
j+=1
|
|
dec_bits += 1
|
|
return res
|