[qfmap] Parse field info from quaked comments

Id's comments are a little inconsistent, but for the most part usable
info can be extracted. While not yet supported, Arcane Dimensions'
comments are extremely consistent (just some issues with hyphen counts
in separators), so parsing out usable info will be fairly easy. The hard
part will be presenting it.
This commit is contained in:
Bill Currie 2022-05-27 20:06:33 +09:00
parent 2e14300a11
commit 6e8a3b8153
4 changed files with 162 additions and 38 deletions

View file

@ -253,7 +253,7 @@ class pldata:
elif type(item) in [int, float]: elif type(item) in [int, float]:
self.write_string(str(item)) self.write_string(str(item))
else: else:
raise PListError(0, "unsupported type") raise PListError(0, f"unsupported type {type(item)}")
def write(self, item): def write(self, item):
self.data = [] self.data = []
self.write_item(item, 0) self.write_item(item, 0)

View file

@ -25,6 +25,9 @@ from bpy.props import BoolProperty, FloatProperty, StringProperty, EnumProperty
from bpy.props import BoolVectorProperty, CollectionProperty, PointerProperty from bpy.props import BoolVectorProperty, CollectionProperty, PointerProperty
from bpy.props import FloatVectorProperty, IntProperty from bpy.props import FloatVectorProperty, IntProperty
from mathutils import Vector from mathutils import Vector
from math import ceil
from textwrap import TextWrapper
from .entityclass import EntityClass from .entityclass import EntityClass
@ -158,23 +161,6 @@ class QFEntpropRemove(bpy.types.Operator):
qfentity.fields.remove(qfentity.field_idx) qfentity.fields.remove(qfentity.field_idx)
return {'FINISHED'} return {'FINISHED'}
def reflow_text(text, max_width):
lines = []
for text_line in text.split("\n"):
if not text_line:
continue
words = text_line.split(" ")
flowed_line = ""
while words:
if len(flowed_line) + len(words[0]) > max_width:
lines.append(flowed_line)
flowed_line = ""
flowed_line += (" " if flowed_line else "") + words[0]
del words[0]
if flowed_line:
lines.append(flowed_line)
return lines
class OBJECT_PT_EntityPanel(bpy.types.Panel): class OBJECT_PT_EntityPanel(bpy.types.Panel):
bl_space_type = 'PROPERTIES' bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW' bl_region_type = 'WINDOW'
@ -198,9 +184,22 @@ class OBJECT_PT_EntityPanel(bpy.types.Panel):
row = layout.row() row = layout.row()
row.prop(qfentity, "classname") row.prop(qfentity, "classname")
box=layout.box() box=layout.box()
lines = reflow_text(ec.comment, 40) width = int(ceil(context.region.width / 6))
for l in lines: mainwrap = TextWrapper(width = width)
box.label(text=l) subwrap = TextWrapper(width = width - 8)
for c in ec.comment:
clines = mainwrap.wrap(c)
for l in clines:
box.label(text=l)
for f in ec.fields.values():
print(f.name)
flines = subwrap.wrap(f.comment)
box.label(text=f"{f.name}")
for l in flines:
box.label(text=f" {l}")
if hasattr(f, "sounds"):
for s in f.sounds:
box.label(text=f" {s[0]} {s[1]}")
row = layout.row() row = layout.row()
for c in range(3): for c in range(3):
col = row.column() col = row.column()

View file

@ -20,9 +20,14 @@
# <pep8 compliant> # <pep8 compliant>
import os import os
from .script import Script try:
from .qfplist import pldata from .script import Script
from . import quakechr from .qfplist import pldata
from . import quakechr
except ImportError:
from script import Script
from qfplist import pldata
import quakechr
MAX_FLAGS = 8 MAX_FLAGS = 8
@ -34,16 +39,46 @@ class EntityClassError(Exception):
def entclass_error(self, msg): def entclass_error(self, msg):
raise EntityClassError(self.filename, self.line, msg) raise EntityClassError(self.filename, self.line, msg)
class EntityField:
def __init__(self, name, default, comment):
self.name = name
self.default = default
self.comment = comment
def to_dictionary(self):
d = {}
if self.default != None:
d["default"] = self.default
if self.comment:
d["comment"] = self.comment
if hasattr(self, "sounds"):
d["sounds"] = self.sounds
return d
@classmethod
def from_dictionary(cls, name, d):
if "default" in d:
default = d["default"]
else:
default = None
if "comment" in d:
comment = d["comment"]
else:
comment = ""
field = cls(name, default, comment)
if "sounds" in d:
field.sounds = d["sounds"]
return field
class EntityClass: class EntityClass:
def __init__(self, name, color, size, flagnames, comment): def __init__(self, name, color, size, flagnames, comment, fields):
self.name = name self.name = name
self.color = color self.color = color
self.size = size self.size = size
self.flagnames = flagnames self.flagnames = flagnames
self.comment = comment self.comment = comment
self.fields = fields
@classmethod @classmethod
def null(cls): def null(cls):
return cls('', (1, 1, 1), None, (), "") return cls('', (1, 1, 1), None, (), "", {})
@classmethod @classmethod
def from_quaked(cls, text, filename, line = 0): def from_quaked(cls, text, filename, line = 0):
script = Script(filename, text) script = Script(filename, text)
@ -59,8 +94,49 @@ class EntityClass:
else: else:
size = None size = None
flagnames = () flagnames = ()
comment = cls.extract_comment(script) comment = []
return cls(name, color, size, flagnames, comment) fields = {}
script.quotes = False
script.single = ""
while script.tokenAvailable(True):
line = []
while script.tokenAvailable():
script.getToken()
if script.token[-2:] == "*/":
break;
line.append(script.token)
if line:
if ((line[0][0] == '"' and line[0][-1] == '"')
or (len(line) > 1 and line[1] == '=')):
if line[0][0] == '"':
fname = line[0][1:-1]
line = line[1:]
else:
fname = line[0]
line = line[2:]
default = None
for i, t in enumerate(line[:-1]):
if t[0] == '(' and line[i + 1] == "default)":
default = t[1:]
break
line = " ".join(line)
fields[fname] = EntityField(fname, default, line)
line = None
elif "sounds" in fields:
sounds = fields["sounds"]
if not hasattr(sounds, "sounds"):
sounds.sounds = []
if line[0][-1] == ')':
line[0] = line[0][:-1]
sounds.sounds.append((line[0], " ".join(line[1:])))
line = None
else:
line = " ".join(line)
if line:
comment.append(line)
if script.token[-2:] == "*/":
break;
return cls(name, color, size, flagnames, comment, fields)
@classmethod @classmethod
def from_dictionary(cls, name, d): def from_dictionary(cls, name, d):
if "color" in d: if "color" in d:
@ -81,11 +157,21 @@ class EntityClass:
if "comment" in d: if "comment" in d:
comment = d["comment"] comment = d["comment"]
else: else:
comment = "" comment = []
return cls(name, color, size, flagnames, comment) if "fields" in d:
field_dict = d["fields"]
fields = {}
for f in field_dict:
fields[f] = EntityField.from_dictionary(f, field_dict[f])
else:
fields = {}
return cls(name, color, size, flagnames, comment, fields)
def to_dictionary(self): def to_dictionary(self):
fields = {}
for f in self.fields:
fields[f] = self.fields[f].to_dictionary()
d = {"color":self.color, "flagnames":self.flagnames, d = {"color":self.color, "flagnames":self.flagnames,
"comment":self.comment} "comment":self.comment, "fields":fields}
if self.size: if self.size:
d["size"] = self.size d["size"] = self.size
return d return d
@ -93,8 +179,11 @@ class EntityClass:
def parse_vector(cls, script): def parse_vector(cls, script):
if script.getToken() != "(": if script.getToken() != "(":
script.error("Missing (") script.error("Missing (")
v = (float(script.getToken()), float(script.getToken()), s = script.getToken(), script.getToken(), script.getToken()
float(script.getToken())) try:
v = (float(s[0]), float(s[1]), float(s[2]))
except ValueError:
v = s
if script.getToken() != ")": if script.getToken() != ")":
script.error("Missing )") script.error("Missing )")
return v return v
@ -118,7 +207,6 @@ class EntityClass:
if len(flagnames) < MAX_FLAGS: if len(flagnames) < MAX_FLAGS:
flagnames.append(script.token) flagnames.append(script.token)
return tuple(flagnames) return tuple(flagnames)
@classmethod
def extract_comment(cls, script): def extract_comment(cls, script):
if not script.tokenAvailable(True): if not script.tokenAvailable(True):
return "" return ""
@ -203,3 +291,35 @@ class EntityClassDict:
self.entity_classes = {} self.entity_classes = {}
for k in ec.keys(): for k in ec.keys():
self.entity_classes[k] = EntityClass.from_dictionary(k, ec[k]) self.entity_classes[k] = EntityClass.from_dictionary(k, ec[k])
if __name__ == "__main__":
import sys
from pprint import pprint
from textwrap import TextWrapper
mainwrap = TextWrapper(width = 70)
fieldwrap = TextWrapper(width = 50)
ecd = EntityClassDict()
for fname in sys.argv[1:]:
ecd.scan_source(fname)
text = ecd.to_plist()
print(text)
ecd.from_plist(text)
for ec in ecd.entity_classes.values():
print(f"{ec.name}: {ec.color} {ec.size} {ec.flagnames}")
for c in ec.comment:
mlines = mainwrap.wrap(c)
for m in mlines:
print(f" {m}")
print()
for f in ec.fields.values():
print(f" {f.name}: {f.default}")
flines = fieldwrap.wrap(f.comment)
for l in flines:
print(f" {l}")
if f.name == "sounds":
for s in f.sounds:
print(f" {s[0]} {s[1]}")
print()
print()

View file

@ -26,17 +26,19 @@ class ScriptError(Exception):
class Script: class Script:
def __init__(self, filename, text, single="{}()':", quotes=True): def __init__(self, filename, text, single="{}()':", quotes=True):
self.filename = filename
if text[0:3] == "\xef\xbb\xbf": if text[0:3] == "\xef\xbb\xbf":
text = text[3:] text = text[3:]
elif text[0] == u"\ufeff": elif text[0] == u"\ufeff":
text = text[1:] text = text[1:]
self.token = ""
self.unget = False
self.text = text self.text = text
self.pos = 0
self.filename = filename
self.line = 1
self.no_quote_lines = False
self.single = single self.single = single
self.quotes = quotes self.quotes = quotes
self.pos = 0
self.line = 1
self.unget = False
def error(self, msg): def error(self, msg):
raise ScriptError(self.filename, self.line, msg) raise ScriptError(self.filename, self.line, msg)
def tokenAvailable(self, crossline=False): def tokenAvailable(self, crossline=False):
@ -102,6 +104,9 @@ class Script:
self.error("EOF inside quoted string") self.error("EOF inside quoted string")
return None return None
if self.text[self.pos] == "\n": if self.text[self.pos] == "\n":
if self.no_quote_lines:
self.error("EOL inside quoted string")
return None
self.line += 1 self.line += 1
self.pos += 1 self.pos += 1
self.token = self.text[start:self.pos] self.token = self.text[start:self.pos]