mirror of
https://git.code.sf.net/p/quake/quakeforge
synced 2025-01-19 07:20:50 +00:00
Revamp entity class loading for blender integration.
Slightly cleaner EntityClass building, and now the directory scanner is part of the EntityClassDict class, which also supports reading/writing plists (for persistent storage in blender).
This commit is contained in:
parent
b3e5083f60
commit
3cad0f978b
1 changed files with 104 additions and 49 deletions
|
@ -1,20 +1,60 @@
|
||||||
# vim:ts=4:et
|
# vim:ts=4:et
|
||||||
|
|
||||||
|
import os
|
||||||
from .script import Script
|
from .script import Script
|
||||||
|
from .qfplist import pldata
|
||||||
|
|
||||||
MAX_FLAGS = 8
|
MAX_FLAGS = 8
|
||||||
|
|
||||||
class EntityClass:
|
class EntityClass:
|
||||||
def __init__(self, text, filename, line = 0):
|
def __init__(self, name, color, size, flagnames, comment):
|
||||||
|
self.name = name
|
||||||
|
self.color = color
|
||||||
|
self.size = size
|
||||||
|
self.flagnames = flagnames
|
||||||
|
self.comment = comment
|
||||||
|
@classmethod
|
||||||
|
def from_quaked(cls, text, filename, line = 0):
|
||||||
script = Script(filename, text)
|
script = Script(filename, text)
|
||||||
if line:
|
if line:
|
||||||
script.line = line
|
script.line = line
|
||||||
script.getToken() # skip over the leading '/*QUAKED'
|
script.getToken() # skip over the leading '/*QUAKED'
|
||||||
self.name = script.getToken()
|
name = script.getToken()
|
||||||
self.color = self.parse_vector(script)
|
color = cls.parse_vector(script)
|
||||||
self.size = self.parse_size(script)
|
size = cls.parse_size(script)
|
||||||
self.flagnames = self.parse_flags(script)
|
flagnames = cls.parse_flags(script)
|
||||||
self.comment = self.extract_comment(script)
|
comment = cls.extract_comment(script)
|
||||||
def parse_vector(self, script):
|
return cls(name, color, size, flagnames, comment)
|
||||||
|
@classmethod
|
||||||
|
def from_dictionary(cls, name, d):
|
||||||
|
if "color" in d:
|
||||||
|
color = d["color"]
|
||||||
|
color = float(color[0]), float(color[1]), float(color[2])
|
||||||
|
else:
|
||||||
|
color = (0.0, 0.0, 0.0)
|
||||||
|
if "size" in d:
|
||||||
|
mins, maxs = d["size"]
|
||||||
|
size = ((float(mins[0]), float(mins[1]), float(mins[2])),
|
||||||
|
(float(maxs[0]), float(maxs[1]), float(maxs[2])))
|
||||||
|
else:
|
||||||
|
size = None
|
||||||
|
if "flagnames" in d:
|
||||||
|
flagnames = list(d["flagnames"])
|
||||||
|
else:
|
||||||
|
flagnames = []
|
||||||
|
if "comment" in d:
|
||||||
|
comment = d["comment"]
|
||||||
|
else:
|
||||||
|
comment = ""
|
||||||
|
return cls(name, color, size, flagnames, comment)
|
||||||
|
def to_dictionary(self):
|
||||||
|
d = {"color":self.color, "flagnames":self.flagnames,
|
||||||
|
"comment":self.comment}
|
||||||
|
if self.size:
|
||||||
|
d["size"] = self.size
|
||||||
|
return d
|
||||||
|
@classmethod
|
||||||
|
def parse_vector(cls, script):
|
||||||
if script.getToken() != "(":
|
if script.getToken() != "(":
|
||||||
raise SyntaxError
|
raise SyntaxError
|
||||||
v = (float(script.getToken()), float(script.getToken()),
|
v = (float(script.getToken()), float(script.getToken()),
|
||||||
|
@ -22,12 +62,14 @@ class EntityClass:
|
||||||
if script.getToken() != ")":
|
if script.getToken() != ")":
|
||||||
raise SyntaxError
|
raise SyntaxError
|
||||||
return v
|
return v
|
||||||
def parse_size(self, script):
|
@classmethod
|
||||||
|
def parse_size(cls, script):
|
||||||
if script.getToken() == "?":
|
if script.getToken() == "?":
|
||||||
return None # use brush size
|
return None # use brush size
|
||||||
script.ungetToken()
|
script.ungetToken()
|
||||||
return self.parse_vector(script), self.parse_vector(script)
|
return cls.parse_vector(script), cls.parse_vector(script)
|
||||||
def parse_flags(self, script):
|
@classmethod
|
||||||
|
def parse_flags(cls, script):
|
||||||
flagnames = []
|
flagnames = []
|
||||||
while script.tokenAvailable():
|
while script.tokenAvailable():
|
||||||
#any remaining words on the line are flag names, but only MAX_FLAGS
|
#any remaining words on the line are flag names, but only MAX_FLAGS
|
||||||
|
@ -36,7 +78,8 @@ class EntityClass:
|
||||||
if len(flagnames) < MAX_FLAGS:
|
if len(flagnames) < MAX_FLAGS:
|
||||||
flagnames.append(script.token)
|
flagnames.append(script.token)
|
||||||
return tuple(flagnames)
|
return tuple(flagnames)
|
||||||
def extract_comment(selk, script):
|
@classmethod
|
||||||
|
def extract_comment(cls, script):
|
||||||
if not script.tokenAvailable(True):
|
if not script.tokenAvailable(True):
|
||||||
return ""
|
return ""
|
||||||
start = pos = script.pos
|
start = pos = script.pos
|
||||||
|
@ -50,44 +93,56 @@ class EntityClass:
|
||||||
script.pos = pos
|
script.pos = pos
|
||||||
return comment
|
return comment
|
||||||
|
|
||||||
import os
|
class EntityClassDict:
|
||||||
|
def __init__(self):
|
||||||
def scan_source(fname, entity_classes):
|
self.path = ""
|
||||||
text = open(fname, "rt").read()
|
self.entity_classes = {}
|
||||||
line = 1
|
def scan_source(self, fname):
|
||||||
pos = 0
|
text = open(fname, "rt").read()
|
||||||
while pos < len(text):
|
line = 1
|
||||||
if text[pos:pos + 8] == "/*QUAKED":
|
pos = 0
|
||||||
start = pos
|
while pos < len(text):
|
||||||
start_line = line
|
if text[pos:pos + 8] == "/*QUAKED":
|
||||||
while pos < len(text) and text[pos:pos + 2] != "*/":
|
start = pos
|
||||||
|
start_line = line
|
||||||
|
while pos < len(text) and text[pos:pos + 2] != "*/":
|
||||||
|
if text[pos] == "\n":
|
||||||
|
line += 1
|
||||||
|
pos += 1
|
||||||
|
if pos < len(text):
|
||||||
|
pos += 2
|
||||||
|
ec = EntityClass.from_quaked(text[start:pos], fname,
|
||||||
|
start_line)
|
||||||
|
self.entity_classes[ec.name] = ec
|
||||||
|
print(ec.name)
|
||||||
|
else:
|
||||||
if text[pos] == "\n":
|
if text[pos] == "\n":
|
||||||
line += 1
|
line += 1
|
||||||
pos += 1
|
pos += 1
|
||||||
if pos < len(text):
|
def scan_directory(self, path):
|
||||||
pos += 2
|
files = os.listdir(path)
|
||||||
ec = EntityClass(text[start:pos], fname, start_line)
|
files.sort()
|
||||||
entity_classes[ec.name] = ec
|
for f in files:
|
||||||
print(ec.name)
|
if f[0] in [".", "_"]:
|
||||||
else:
|
continue
|
||||||
if text[pos] == "\n":
|
if os.path.isdir(os.path.join(path, f)):
|
||||||
line += 1
|
self.scan_directory(os.path.join(path, f))
|
||||||
pos += 1
|
else:
|
||||||
|
if f[-3:] == ".qc":
|
||||||
def scan_directory(path, entity_classes):
|
self.scan_source(os.path.join(path, f))
|
||||||
files = os.listdir(path)
|
def from_source_tree(self, path):
|
||||||
files.sort()
|
self.path = path
|
||||||
|
self.entity_classes = {}
|
||||||
for f in files:
|
self.scan_directory(self.path)
|
||||||
if f[0] in [".", "_"]:
|
def to_plist(self):
|
||||||
continue
|
pl = pldata()
|
||||||
if os.path.isdir(os.path.join(path, f)):
|
ec = {}
|
||||||
scan_directory(os.path.join(path, f), entity_classes)
|
for k in self.entity_classes.keys():
|
||||||
else:
|
ec[k] = self.entity_classes[k].to_dictionary()
|
||||||
if f[-3:] == ".qc":
|
return pl.write(ec)
|
||||||
scan_source(os.path.join(path, f), entity_classes)
|
def from_plist(self, plist):
|
||||||
|
pl = pldata(plist)
|
||||||
def build_entityclasses(path):
|
ec = pl.parse()
|
||||||
entity_classes = {}
|
self.entity_classes = {}
|
||||||
scan_directory(path, entity_classes)
|
for k in ec.keys():
|
||||||
return entity_classes
|
self.entity_classes[k] = EntityClass.from_dictionary(k, ec[k])
|
||||||
|
|
Loading…
Reference in a new issue