mirror of
https://git.do.srb2.org/STJr/SRB2.git
synced 2024-11-25 13:51:43 +00:00
Refactor tokenizer
This commit is contained in:
parent
dcae99a9fb
commit
9dbf30e20d
9 changed files with 426 additions and 320 deletions
|
@ -35,6 +35,7 @@ add_executable(SRB2SDL2 MACOSX_BUNDLE WIN32
|
|||
m_misc.c
|
||||
m_perfstats.c
|
||||
m_random.c
|
||||
m_tokenizer.c
|
||||
m_queue.c
|
||||
info.c
|
||||
p_ceilng.c
|
||||
|
|
|
@ -29,6 +29,7 @@ m_menu.c
|
|||
m_misc.c
|
||||
m_perfstats.c
|
||||
m_random.c
|
||||
m_tokenizer.c
|
||||
m_queue.c
|
||||
info.c
|
||||
p_ceilng.c
|
||||
|
|
248
src/m_misc.c
248
src/m_misc.c
|
@ -31,6 +31,7 @@
|
|||
#include "doomdef.h"
|
||||
#include "g_game.h"
|
||||
#include "m_misc.h"
|
||||
#include "m_tokenizer.h"
|
||||
#include "hu_stuff.h"
|
||||
#include "st_stuff.h"
|
||||
#include "v_video.h"
|
||||
|
@ -1975,262 +1976,39 @@ void M_UnGetToken(void)
|
|||
endPos = oldendPos;
|
||||
}
|
||||
|
||||
#define NUMTOKENS 2
|
||||
static const char *tokenizerInput = NULL;
|
||||
static UINT32 tokenCapacity[NUMTOKENS] = {0};
|
||||
static char *tokenizerToken[NUMTOKENS] = {NULL};
|
||||
static UINT32 tokenizerStartPos = 0;
|
||||
static UINT32 tokenizerEndPos = 0;
|
||||
static UINT32 tokenizerInputLength = 0;
|
||||
static UINT8 tokenizerInComment = 0; // 0 = not in comment, 1 = // Single-line, 2 = /* Multi-line */
|
||||
static tokenizer_t *globalTokenizer = NULL;
|
||||
|
||||
void M_TokenizerOpen(const char *inputString)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
tokenizerInput = inputString;
|
||||
for (i = 0; i < NUMTOKENS; i++)
|
||||
{
|
||||
tokenCapacity[i] = 1024;
|
||||
tokenizerToken[i] = (char*)Z_Malloc(tokenCapacity[i] * sizeof(char), PU_STATIC, NULL);
|
||||
}
|
||||
tokenizerInputLength = strlen(tokenizerInput);
|
||||
globalTokenizer = Tokenizer_Open(inputString, 2);
|
||||
}
|
||||
|
||||
void M_TokenizerClose(void)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
tokenizerInput = NULL;
|
||||
for (i = 0; i < NUMTOKENS; i++)
|
||||
Z_Free(tokenizerToken[i]);
|
||||
tokenizerStartPos = 0;
|
||||
tokenizerEndPos = 0;
|
||||
tokenizerInComment = 0;
|
||||
}
|
||||
|
||||
static void M_DetectComment(UINT32 *pos)
|
||||
{
|
||||
if (tokenizerInComment)
|
||||
return;
|
||||
|
||||
if (*pos >= tokenizerInputLength - 1)
|
||||
return;
|
||||
|
||||
if (tokenizerInput[*pos] != '/')
|
||||
return;
|
||||
|
||||
//Single-line comment start
|
||||
if (tokenizerInput[*pos + 1] == '/')
|
||||
tokenizerInComment = 1;
|
||||
//Multi-line comment start
|
||||
else if (tokenizerInput[*pos + 1] == '*')
|
||||
tokenizerInComment = 2;
|
||||
}
|
||||
|
||||
static void M_ReadTokenString(UINT32 i)
|
||||
{
|
||||
UINT32 tokenLength = tokenizerEndPos - tokenizerStartPos;
|
||||
if (tokenLength + 1 > tokenCapacity[i])
|
||||
{
|
||||
tokenCapacity[i] = tokenLength + 1;
|
||||
// Assign the memory. Don't forget an extra byte for the end of the string!
|
||||
tokenizerToken[i] = (char *)Z_Malloc(tokenCapacity[i] * sizeof(char), PU_STATIC, NULL);
|
||||
}
|
||||
// Copy the string.
|
||||
M_Memcpy(tokenizerToken[i], tokenizerInput + tokenizerStartPos, (size_t)tokenLength);
|
||||
// Make the final character NUL.
|
||||
tokenizerToken[i][tokenLength] = '\0';
|
||||
Tokenizer_Close(globalTokenizer);
|
||||
globalTokenizer = NULL;
|
||||
}
|
||||
|
||||
const char *M_TokenizerRead(UINT32 i)
|
||||
{
|
||||
if (!tokenizerInput)
|
||||
if (!globalTokenizer)
|
||||
return NULL;
|
||||
|
||||
tokenizerStartPos = tokenizerEndPos;
|
||||
|
||||
// Try to detect comments now, in case we're pointing right at one
|
||||
M_DetectComment(&tokenizerStartPos);
|
||||
|
||||
// Find the first non-whitespace char, or else the end of the string trying
|
||||
while ((tokenizerInput[tokenizerStartPos] == ' '
|
||||
|| tokenizerInput[tokenizerStartPos] == '\t'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\r'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\n'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\0'
|
||||
|| tokenizerInput[tokenizerStartPos] == '=' || tokenizerInput[tokenizerStartPos] == ';' // UDMF TEXTMAP.
|
||||
|| tokenizerInComment != 0)
|
||||
&& tokenizerStartPos < tokenizerInputLength)
|
||||
{
|
||||
// Try to detect comment endings now
|
||||
if (tokenizerInComment == 1 && tokenizerInput[tokenizerStartPos] == '\n')
|
||||
tokenizerInComment = 0; // End of line for a single-line comment
|
||||
else if (tokenizerInComment == 2
|
||||
&& tokenizerStartPos < tokenizerInputLength - 1
|
||||
&& tokenizerInput[tokenizerStartPos] == '*'
|
||||
&& tokenizerInput[tokenizerStartPos+1] == '/')
|
||||
{
|
||||
// End of multi-line comment
|
||||
tokenizerInComment = 0;
|
||||
tokenizerStartPos++; // Make damn well sure we're out of the comment ending at the end of it all
|
||||
}
|
||||
|
||||
tokenizerStartPos++;
|
||||
M_DetectComment(&tokenizerStartPos);
|
||||
}
|
||||
|
||||
// If the end of the string is reached, no token is to be read
|
||||
if (tokenizerStartPos == tokenizerInputLength) {
|
||||
tokenizerEndPos = tokenizerInputLength;
|
||||
return NULL;
|
||||
}
|
||||
// Else, if it's one of these three symbols, capture only this one character
|
||||
else if (tokenizerInput[tokenizerStartPos] == ','
|
||||
|| tokenizerInput[tokenizerStartPos] == '{'
|
||||
|| tokenizerInput[tokenizerStartPos] == '}')
|
||||
{
|
||||
tokenizerEndPos = tokenizerStartPos + 1;
|
||||
tokenizerToken[i][0] = tokenizerInput[tokenizerStartPos];
|
||||
tokenizerToken[i][1] = '\0';
|
||||
return tokenizerToken[i];
|
||||
}
|
||||
// Return entire string within quotes, except without the quotes.
|
||||
else if (tokenizerInput[tokenizerStartPos] == '"')
|
||||
{
|
||||
tokenizerEndPos = ++tokenizerStartPos;
|
||||
while (tokenizerInput[tokenizerEndPos] != '"' && tokenizerEndPos < tokenizerInputLength)
|
||||
tokenizerEndPos++;
|
||||
|
||||
M_ReadTokenString(i);
|
||||
tokenizerEndPos++;
|
||||
return tokenizerToken[i];
|
||||
}
|
||||
|
||||
// Now find the end of the token. This includes several additional characters that are okay to capture as one character, but not trailing at the end of another token.
|
||||
tokenizerEndPos = tokenizerStartPos + 1;
|
||||
while ((tokenizerInput[tokenizerEndPos] != ' '
|
||||
&& tokenizerInput[tokenizerEndPos] != '\t'
|
||||
&& tokenizerInput[tokenizerEndPos] != '\r'
|
||||
&& tokenizerInput[tokenizerEndPos] != '\n'
|
||||
&& tokenizerInput[tokenizerEndPos] != ','
|
||||
&& tokenizerInput[tokenizerEndPos] != '{'
|
||||
&& tokenizerInput[tokenizerEndPos] != '}'
|
||||
&& tokenizerInput[tokenizerEndPos] != '=' && tokenizerInput[tokenizerEndPos] != ';' // UDMF TEXTMAP.
|
||||
&& tokenizerInComment == 0)
|
||||
&& tokenizerEndPos < tokenizerInputLength)
|
||||
{
|
||||
tokenizerEndPos++;
|
||||
// Try to detect comment starts now; if it's in a comment, we don't want it in this token
|
||||
M_DetectComment(&tokenizerEndPos);
|
||||
}
|
||||
|
||||
M_ReadTokenString(i);
|
||||
return tokenizerToken[i];
|
||||
}
|
||||
|
||||
const char *M_TokenizerReadZDoom(UINT32 i)
|
||||
{
|
||||
if (!tokenizerInput)
|
||||
return NULL;
|
||||
|
||||
tokenizerStartPos = tokenizerEndPos;
|
||||
|
||||
// Try to detect comments now, in case we're pointing right at one
|
||||
M_DetectComment(&tokenizerStartPos);
|
||||
|
||||
// Find the first non-whitespace char, or else the end of the string trying
|
||||
while ((tokenizerInput[tokenizerStartPos] == ' '
|
||||
|| tokenizerInput[tokenizerStartPos] == '\t'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\r'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\n'
|
||||
|| tokenizerInput[tokenizerStartPos] == '\0'
|
||||
|| tokenizerInComment != 0)
|
||||
&& tokenizerStartPos < tokenizerInputLength)
|
||||
{
|
||||
// Try to detect comment endings now
|
||||
if (tokenizerInComment == 1 && tokenizerInput[tokenizerStartPos] == '\n')
|
||||
tokenizerInComment = 0; // End of line for a single-line comment
|
||||
else if (tokenizerInComment == 2
|
||||
&& tokenizerStartPos < tokenizerInputLength - 1
|
||||
&& tokenizerInput[tokenizerStartPos] == '*'
|
||||
&& tokenizerInput[tokenizerStartPos+1] == '/')
|
||||
{
|
||||
// End of multi-line comment
|
||||
tokenizerInComment = 0;
|
||||
tokenizerStartPos++; // Make damn well sure we're out of the comment ending at the end of it all
|
||||
}
|
||||
|
||||
tokenizerStartPos++;
|
||||
M_DetectComment(&tokenizerStartPos);
|
||||
}
|
||||
|
||||
// If the end of the string is reached, no token is to be read
|
||||
if (tokenizerStartPos == tokenizerInputLength) {
|
||||
tokenizerEndPos = tokenizerInputLength;
|
||||
return NULL;
|
||||
}
|
||||
// Else, if it's one of these three symbols, capture only this one character
|
||||
else if (tokenizerInput[tokenizerStartPos] == ','
|
||||
|| tokenizerInput[tokenizerStartPos] == '{'
|
||||
|| tokenizerInput[tokenizerStartPos] == '}'
|
||||
|| tokenizerInput[tokenizerStartPos] == '['
|
||||
|| tokenizerInput[tokenizerStartPos] == ']'
|
||||
|| tokenizerInput[tokenizerStartPos] == '='
|
||||
|| tokenizerInput[tokenizerStartPos] == ':'
|
||||
|| tokenizerInput[tokenizerStartPos] == '%')
|
||||
{
|
||||
tokenizerEndPos = tokenizerStartPos + 1;
|
||||
tokenizerToken[i][0] = tokenizerInput[tokenizerStartPos];
|
||||
tokenizerToken[i][1] = '\0';
|
||||
return tokenizerToken[i];
|
||||
}
|
||||
// Return entire string within quotes, except without the quotes.
|
||||
else if (tokenizerInput[tokenizerStartPos] == '"')
|
||||
{
|
||||
tokenizerEndPos = ++tokenizerStartPos;
|
||||
while (tokenizerInput[tokenizerEndPos] != '"' && tokenizerEndPos < tokenizerInputLength)
|
||||
tokenizerEndPos++;
|
||||
|
||||
M_ReadTokenString(i);
|
||||
tokenizerEndPos++;
|
||||
return tokenizerToken[i];
|
||||
}
|
||||
|
||||
// Now find the end of the token. This includes several additional characters that are okay to capture as one character, but not trailing at the end of another token.
|
||||
tokenizerEndPos = tokenizerStartPos + 1;
|
||||
while ((tokenizerInput[tokenizerEndPos] != ' '
|
||||
&& tokenizerInput[tokenizerEndPos] != '\t'
|
||||
&& tokenizerInput[tokenizerEndPos] != '\r'
|
||||
&& tokenizerInput[tokenizerEndPos] != '\n'
|
||||
&& tokenizerInput[tokenizerEndPos] != ','
|
||||
&& tokenizerInput[tokenizerEndPos] != '{'
|
||||
&& tokenizerInput[tokenizerEndPos] != '}'
|
||||
&& tokenizerInput[tokenizerEndPos] != '['
|
||||
&& tokenizerInput[tokenizerEndPos] != ']'
|
||||
&& tokenizerInput[tokenizerEndPos] != '='
|
||||
&& tokenizerInput[tokenizerEndPos] != ':'
|
||||
&& tokenizerInput[tokenizerEndPos] != '%'
|
||||
&& tokenizerInComment == 0)
|
||||
&& tokenizerEndPos < tokenizerInputLength)
|
||||
{
|
||||
tokenizerEndPos++;
|
||||
// Try to detect comment starts now; if it's in a comment, we don't want it in this token
|
||||
M_DetectComment(&tokenizerEndPos);
|
||||
}
|
||||
|
||||
M_ReadTokenString(i);
|
||||
return tokenizerToken[i];
|
||||
return Tokenizer_SRB2Read(globalTokenizer, i);
|
||||
}
|
||||
|
||||
UINT32 M_TokenizerGetEndPos(void)
|
||||
{
|
||||
return tokenizerEndPos;
|
||||
if (!globalTokenizer)
|
||||
return 0;
|
||||
|
||||
return Tokenizer_GetEndPos(globalTokenizer);
|
||||
}
|
||||
|
||||
void M_TokenizerSetEndPos(UINT32 newPos)
|
||||
{
|
||||
tokenizerEndPos = newPos;
|
||||
if (globalTokenizer)
|
||||
Tokenizer_SetEndPos(globalTokenizer, newPos);
|
||||
}
|
||||
|
||||
/** Count bits in a number.
|
||||
|
|
278
src/m_tokenizer.c
Normal file
278
src/m_tokenizer.c
Normal file
|
@ -0,0 +1,278 @@
|
|||
// SONIC ROBO BLAST 2
|
||||
//-----------------------------------------------------------------------------
|
||||
// Copyright (C) 2013-2023 by Sonic Team Junior.
|
||||
//
|
||||
// This program is free software distributed under the
|
||||
// terms of the GNU General Public License, version 2.
|
||||
// See the 'LICENSE' file for more details.
|
||||
//-----------------------------------------------------------------------------
|
||||
/// \file m_tokenizer.c
|
||||
/// \brief Tokenizer
|
||||
|
||||
#include "m_tokenizer.h"
|
||||
#include "z_zone.h"
|
||||
|
||||
tokenizer_t *Tokenizer_Open(const char *inputString, unsigned numTokens)
|
||||
{
|
||||
tokenizer_t *tokenizer = Z_Malloc(sizeof(tokenizer_t), PU_STATIC, NULL);
|
||||
|
||||
tokenizer->input = inputString;
|
||||
tokenizer->startPos = 0;
|
||||
tokenizer->endPos = 0;
|
||||
tokenizer->inputLength = 0;
|
||||
tokenizer->inComment = 0;
|
||||
tokenizer->get = Tokenizer_Read;
|
||||
|
||||
if (numTokens < 1)
|
||||
numTokens = 1;
|
||||
|
||||
tokenizer->numTokens = numTokens;
|
||||
tokenizer->capacity = Z_Malloc(sizeof(UINT32) * numTokens, PU_STATIC, NULL);
|
||||
tokenizer->token = Z_Malloc(sizeof(char*) * numTokens, PU_STATIC, NULL);
|
||||
|
||||
for (size_t i = 0; i < numTokens; i++)
|
||||
{
|
||||
tokenizer->capacity[i] = 1024;
|
||||
tokenizer->token[i] = (char*)Z_Malloc(tokenizer->capacity[i] * sizeof(char), PU_STATIC, NULL);
|
||||
}
|
||||
|
||||
tokenizer->inputLength = strlen(tokenizer->input);
|
||||
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
void Tokenizer_Close(tokenizer_t *tokenizer)
|
||||
{
|
||||
if (!tokenizer)
|
||||
return;
|
||||
|
||||
for (size_t i = 0; i < tokenizer->numTokens; i++)
|
||||
Z_Free(tokenizer->token[i]);
|
||||
Z_Free(tokenizer->capacity);
|
||||
Z_Free(tokenizer->token);
|
||||
Z_Free(tokenizer);
|
||||
}
|
||||
|
||||
static void Tokenizer_DetectComment(tokenizer_t *tokenizer, UINT32 *pos)
|
||||
{
|
||||
if (tokenizer->inComment)
|
||||
return;
|
||||
|
||||
if (*pos >= tokenizer->inputLength - 1)
|
||||
return;
|
||||
|
||||
if (tokenizer->input[*pos] != '/')
|
||||
return;
|
||||
|
||||
// Single-line comment start
|
||||
if (tokenizer->input[*pos + 1] == '/')
|
||||
tokenizer->inComment = 1;
|
||||
// Multi-line comment start
|
||||
else if (tokenizer->input[*pos + 1] == '*')
|
||||
tokenizer->inComment = 2;
|
||||
}
|
||||
|
||||
static void Tokenizer_ReadTokenString(tokenizer_t *tokenizer, UINT32 i)
|
||||
{
|
||||
UINT32 tokenLength = tokenizer->endPos - tokenizer->startPos;
|
||||
if (tokenLength + 1 > tokenizer->capacity[i])
|
||||
{
|
||||
tokenizer->capacity[i] = tokenLength + 1;
|
||||
// Assign the memory. Don't forget an extra byte for the end of the string!
|
||||
tokenizer->token[i] = (char *)Z_Malloc(tokenizer->capacity[i] * sizeof(char), PU_STATIC, NULL);
|
||||
}
|
||||
// Copy the string.
|
||||
M_Memcpy(tokenizer->token[i], tokenizer->input + tokenizer->startPos, (size_t)tokenLength);
|
||||
// Make the final character NUL.
|
||||
tokenizer->token[i][tokenLength] = '\0';
|
||||
}
|
||||
|
||||
const char *Tokenizer_Read(tokenizer_t *tokenizer, UINT32 i)
|
||||
{
|
||||
if (!tokenizer->input)
|
||||
return NULL;
|
||||
|
||||
tokenizer->startPos = tokenizer->endPos;
|
||||
|
||||
// Try to detect comments now, in case we're pointing right at one
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->startPos);
|
||||
|
||||
// Find the first non-whitespace char, or else the end of the string trying
|
||||
while ((tokenizer->input[tokenizer->startPos] == ' '
|
||||
|| tokenizer->input[tokenizer->startPos] == '\t'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\r'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\n'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\0'
|
||||
|| tokenizer->inComment != 0)
|
||||
&& tokenizer->startPos < tokenizer->inputLength)
|
||||
{
|
||||
// Try to detect comment endings now
|
||||
if (tokenizer->inComment == 1 && tokenizer->input[tokenizer->startPos] == '\n')
|
||||
tokenizer->inComment = 0; // End of line for a single-line comment
|
||||
else if (tokenizer->inComment == 2
|
||||
&& tokenizer->startPos < tokenizer->inputLength - 1
|
||||
&& tokenizer->input[tokenizer->startPos] == '*'
|
||||
&& tokenizer->input[tokenizer->startPos+1] == '/')
|
||||
{
|
||||
// End of multi-line comment
|
||||
tokenizer->inComment = 0;
|
||||
tokenizer->startPos++; // Make damn well sure we're out of the comment ending at the end of it all
|
||||
}
|
||||
|
||||
tokenizer->startPos++;
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->startPos);
|
||||
}
|
||||
|
||||
// If the end of the string is reached, no token is to be read
|
||||
if (tokenizer->startPos == tokenizer->inputLength) {
|
||||
tokenizer->endPos = tokenizer->inputLength;
|
||||
return NULL;
|
||||
}
|
||||
// Else, if it's one of these three symbols, capture only this one character
|
||||
else if (tokenizer->input[tokenizer->startPos] == ','
|
||||
|| tokenizer->input[tokenizer->startPos] == '{'
|
||||
|| tokenizer->input[tokenizer->startPos] == '}'
|
||||
|| tokenizer->input[tokenizer->startPos] == '['
|
||||
|| tokenizer->input[tokenizer->startPos] == ']'
|
||||
|| tokenizer->input[tokenizer->startPos] == '='
|
||||
|| tokenizer->input[tokenizer->startPos] == ':'
|
||||
|| tokenizer->input[tokenizer->startPos] == '%')
|
||||
{
|
||||
tokenizer->endPos = tokenizer->startPos + 1;
|
||||
tokenizer->token[i][0] = tokenizer->input[tokenizer->startPos];
|
||||
tokenizer->token[i][1] = '\0';
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
// Return entire string within quotes, except without the quotes.
|
||||
else if (tokenizer->input[tokenizer->startPos] == '"')
|
||||
{
|
||||
tokenizer->endPos = ++tokenizer->startPos;
|
||||
while (tokenizer->input[tokenizer->endPos] != '"' && tokenizer->endPos < tokenizer->inputLength)
|
||||
tokenizer->endPos++;
|
||||
|
||||
Tokenizer_ReadTokenString(tokenizer, i);
|
||||
tokenizer->endPos++;
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
|
||||
// Now find the end of the token. This includes several additional characters that are okay to capture as one character, but not trailing at the end of another token.
|
||||
tokenizer->endPos = tokenizer->startPos + 1;
|
||||
while ((tokenizer->input[tokenizer->endPos] != ' '
|
||||
&& tokenizer->input[tokenizer->endPos] != '\t'
|
||||
&& tokenizer->input[tokenizer->endPos] != '\r'
|
||||
&& tokenizer->input[tokenizer->endPos] != '\n'
|
||||
&& tokenizer->input[tokenizer->endPos] != ','
|
||||
&& tokenizer->input[tokenizer->endPos] != '{'
|
||||
&& tokenizer->input[tokenizer->endPos] != '}'
|
||||
&& tokenizer->input[tokenizer->endPos] != '['
|
||||
&& tokenizer->input[tokenizer->endPos] != ']'
|
||||
&& tokenizer->input[tokenizer->endPos] != '='
|
||||
&& tokenizer->input[tokenizer->endPos] != ':'
|
||||
&& tokenizer->input[tokenizer->endPos] != '%'
|
||||
&& tokenizer->inComment == 0)
|
||||
&& tokenizer->endPos < tokenizer->inputLength)
|
||||
{
|
||||
tokenizer->endPos++;
|
||||
// Try to detect comment starts now; if it's in a comment, we don't want it in this token
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->endPos);
|
||||
}
|
||||
|
||||
Tokenizer_ReadTokenString(tokenizer, i);
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
|
||||
const char *Tokenizer_SRB2Read(tokenizer_t *tokenizer, UINT32 i)
|
||||
{
|
||||
if (!tokenizer->input)
|
||||
return NULL;
|
||||
|
||||
tokenizer->startPos = tokenizer->endPos;
|
||||
|
||||
// Try to detect comments now, in case we're pointing right at one
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->startPos);
|
||||
|
||||
// Find the first non-whitespace char, or else the end of the string trying
|
||||
while ((tokenizer->input[tokenizer->startPos] == ' '
|
||||
|| tokenizer->input[tokenizer->startPos] == '\t'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\r'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\n'
|
||||
|| tokenizer->input[tokenizer->startPos] == '\0'
|
||||
|| tokenizer->input[tokenizer->startPos] == '=' || tokenizer->input[tokenizer->startPos] == ';' // UDMF TEXTMAP.
|
||||
|| tokenizer->inComment != 0)
|
||||
&& tokenizer->startPos < tokenizer->inputLength)
|
||||
{
|
||||
// Try to detect comment endings now
|
||||
if (tokenizer->inComment == 1 && tokenizer->input[tokenizer->startPos] == '\n')
|
||||
tokenizer->inComment = 0; // End of line for a single-line comment
|
||||
else if (tokenizer->inComment == 2
|
||||
&& tokenizer->startPos < tokenizer->inputLength - 1
|
||||
&& tokenizer->input[tokenizer->startPos] == '*'
|
||||
&& tokenizer->input[tokenizer->startPos+1] == '/')
|
||||
{
|
||||
// End of multi-line comment
|
||||
tokenizer->inComment = 0;
|
||||
tokenizer->startPos++; // Make damn well sure we're out of the comment ending at the end of it all
|
||||
}
|
||||
|
||||
tokenizer->startPos++;
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->startPos);
|
||||
}
|
||||
|
||||
// If the end of the string is reached, no token is to be read
|
||||
if (tokenizer->startPos == tokenizer->inputLength) {
|
||||
tokenizer->endPos = tokenizer->inputLength;
|
||||
return NULL;
|
||||
}
|
||||
// Else, if it's one of these three symbols, capture only this one character
|
||||
else if (tokenizer->input[tokenizer->startPos] == ','
|
||||
|| tokenizer->input[tokenizer->startPos] == '{'
|
||||
|| tokenizer->input[tokenizer->startPos] == '}')
|
||||
{
|
||||
tokenizer->endPos = tokenizer->startPos + 1;
|
||||
tokenizer->token[i][0] = tokenizer->input[tokenizer->startPos];
|
||||
tokenizer->token[i][1] = '\0';
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
// Return entire string within quotes, except without the quotes.
|
||||
else if (tokenizer->input[tokenizer->startPos] == '"')
|
||||
{
|
||||
tokenizer->endPos = ++tokenizer->startPos;
|
||||
while (tokenizer->input[tokenizer->endPos] != '"' && tokenizer->endPos < tokenizer->inputLength)
|
||||
tokenizer->endPos++;
|
||||
|
||||
Tokenizer_ReadTokenString(tokenizer, i);
|
||||
tokenizer->endPos++;
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
|
||||
// Now find the end of the token. This includes several additional characters that are okay to capture as one character, but not trailing at the end of another token.
|
||||
tokenizer->endPos = tokenizer->startPos + 1;
|
||||
while ((tokenizer->input[tokenizer->endPos] != ' '
|
||||
&& tokenizer->input[tokenizer->endPos] != '\t'
|
||||
&& tokenizer->input[tokenizer->endPos] != '\r'
|
||||
&& tokenizer->input[tokenizer->endPos] != '\n'
|
||||
&& tokenizer->input[tokenizer->endPos] != ','
|
||||
&& tokenizer->input[tokenizer->endPos] != '{'
|
||||
&& tokenizer->input[tokenizer->endPos] != '}'
|
||||
&& tokenizer->input[tokenizer->endPos] != '=' && tokenizer->input[tokenizer->endPos] != ';' // UDMF TEXTMAP.
|
||||
&& tokenizer->inComment == 0)
|
||||
&& tokenizer->endPos < tokenizer->inputLength)
|
||||
{
|
||||
tokenizer->endPos++;
|
||||
// Try to detect comment starts now; if it's in a comment, we don't want it in this token
|
||||
Tokenizer_DetectComment(tokenizer, &tokenizer->endPos);
|
||||
}
|
||||
|
||||
Tokenizer_ReadTokenString(tokenizer, i);
|
||||
return tokenizer->token[i];
|
||||
}
|
||||
|
||||
UINT32 Tokenizer_GetEndPos(tokenizer_t *tokenizer)
|
||||
{
|
||||
return tokenizer->endPos;
|
||||
}
|
||||
|
||||
void Tokenizer_SetEndPos(tokenizer_t *tokenizer, UINT32 newPos)
|
||||
{
|
||||
tokenizer->endPos = newPos;
|
||||
}
|
38
src/m_tokenizer.h
Normal file
38
src/m_tokenizer.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
// SONIC ROBO BLAST 2
|
||||
//-----------------------------------------------------------------------------
|
||||
// Copyright (C) 2013-2023 by Sonic Team Junior.
|
||||
//
|
||||
// This program is free software distributed under the
|
||||
// terms of the GNU General Public License, version 2.
|
||||
// See the 'LICENSE' file for more details.
|
||||
//-----------------------------------------------------------------------------
|
||||
/// \file m_tokenizer.h
|
||||
/// \brief Tokenizer
|
||||
|
||||
#ifndef __M_TOKENIZER__
|
||||
#define __M_TOKENIZER__
|
||||
|
||||
#include "doomdef.h"
|
||||
|
||||
typedef struct Tokenizer
|
||||
{
|
||||
const char *input;
|
||||
unsigned numTokens;
|
||||
UINT32 *capacity;
|
||||
char **token;
|
||||
UINT32 startPos;
|
||||
UINT32 endPos;
|
||||
UINT32 inputLength;
|
||||
UINT8 inComment; // 0 = not in comment, 1 = // Single-line, 2 = /* Multi-line */
|
||||
const char *(*get)(struct Tokenizer*, UINT32);
|
||||
} tokenizer_t;
|
||||
|
||||
tokenizer_t *Tokenizer_Open(const char *inputString, unsigned numTokens);
|
||||
void Tokenizer_Close(tokenizer_t *tokenizer);
|
||||
|
||||
const char *Tokenizer_Read(tokenizer_t *tokenizer, UINT32 i);
|
||||
const char *Tokenizer_SRB2Read(tokenizer_t *tokenizer, UINT32 i);
|
||||
UINT32 Tokenizer_GetEndPos(tokenizer_t *tokenizer);
|
||||
void Tokenizer_SetEndPos(tokenizer_t *tokenizer, UINT32 newPos);
|
||||
|
||||
#endif
|
|
@ -14,6 +14,7 @@
|
|||
#include "v_video.h" // pMasterPalette
|
||||
#include "z_zone.h"
|
||||
#include "w_wad.h"
|
||||
#include "m_tokenizer.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
|
@ -294,9 +295,9 @@ boolean PaletteRemap_AddTint(remaptable_t *tr, int start, int end, int r, int g,
|
|||
return true;
|
||||
}
|
||||
|
||||
static boolean ExpectToken(const char *expect)
|
||||
static boolean ExpectToken(tokenizer_t *sc, const char *expect)
|
||||
{
|
||||
return strcmp(M_TokenizerReadZDoom(0), expect) == 0;
|
||||
return strcmp(sc->get(sc, 0), expect) == 0;
|
||||
}
|
||||
|
||||
static boolean StringToNumber(const char *tkn, int *out)
|
||||
|
@ -321,14 +322,14 @@ static boolean StringToNumber(const char *tkn, int *out)
|
|||
return true;
|
||||
}
|
||||
|
||||
static boolean ParseNumber(int *out)
|
||||
static boolean ParseNumber(tokenizer_t *sc, int *out)
|
||||
{
|
||||
return StringToNumber(M_TokenizerReadZDoom(0), out);
|
||||
return StringToNumber(sc->get(sc, 0), out);
|
||||
}
|
||||
|
||||
static boolean ParseDecimal(double *out)
|
||||
static boolean ParseDecimal(tokenizer_t *sc, double *out)
|
||||
{
|
||||
const char *tkn = M_TokenizerReadZDoom(0);
|
||||
const char *tkn = sc->get(sc, 0);
|
||||
|
||||
char *endPos = NULL;
|
||||
|
||||
|
@ -362,26 +363,24 @@ static struct PaletteRemapParseResult *ThrowError(const char *format, ...)
|
|||
return err;
|
||||
}
|
||||
|
||||
struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char *translation)
|
||||
static struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, tokenizer_t *sc)
|
||||
{
|
||||
int start, end;
|
||||
|
||||
M_TokenizerOpen(translation);
|
||||
|
||||
if (!ParseNumber(&start))
|
||||
if (!ParseNumber(sc, &start))
|
||||
return ThrowError("expected a number for start range");
|
||||
if (!ExpectToken(":"))
|
||||
if (!ExpectToken(sc, ":"))
|
||||
return ThrowError("expected ':'");
|
||||
if (!ParseNumber(&end))
|
||||
if (!ParseNumber(sc, &end))
|
||||
return ThrowError("expected a number for end range");
|
||||
|
||||
if (start < 0 || start > 255 || end < 0 || end > 255)
|
||||
return ThrowError("palette indices out of range");
|
||||
|
||||
if (!ExpectToken("="))
|
||||
if (!ExpectToken(sc, "="))
|
||||
return ThrowError("expected '='");
|
||||
|
||||
const char *tkn = M_TokenizerReadZDoom(0);
|
||||
const char *tkn = sc->get(sc, 0);
|
||||
if (strcmp(tkn, "[") == 0)
|
||||
{
|
||||
// translation using RGB values
|
||||
|
@ -389,42 +388,42 @@ struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char
|
|||
int r2, g2, b2;
|
||||
|
||||
// start
|
||||
if (!ParseNumber(&r1))
|
||||
if (!ParseNumber(sc, &r1))
|
||||
return ThrowError("expected a number for starting red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseNumber(&g1))
|
||||
if (!ParseNumber(sc, &g1))
|
||||
return ThrowError("expected a number for starting green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseNumber(&b1))
|
||||
if (!ParseNumber(sc, &b1))
|
||||
return ThrowError("expected a number for starting blue");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
if (!ExpectToken(":"))
|
||||
if (!ExpectToken(sc, ":"))
|
||||
return ThrowError("expected ':'");
|
||||
if (!ExpectToken("["))
|
||||
if (!ExpectToken(sc, "["))
|
||||
return ThrowError("expected '[");
|
||||
|
||||
// end
|
||||
if (!ParseNumber(&r2))
|
||||
if (!ParseNumber(sc, &r2))
|
||||
return ThrowError("expected a number for ending red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseNumber(&g2))
|
||||
if (!ParseNumber(sc, &g2))
|
||||
return ThrowError("expected a number for ending green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseNumber(&b2))
|
||||
if (!ParseNumber(sc, &b2))
|
||||
return ThrowError("expected a number for ending blue");
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
|
||||
PaletteRemap_AddColorRange(tr, start, end, r1, g1, b1, r2, g2, b2);
|
||||
|
@ -435,45 +434,44 @@ struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char
|
|||
double r1, g1, b1;
|
||||
double r2, g2, b2;
|
||||
|
||||
if (!ExpectToken("["))
|
||||
if (!ExpectToken(sc, "["))
|
||||
return ThrowError("expected '[");
|
||||
|
||||
// start
|
||||
if (!ParseDecimal(&r1))
|
||||
if (!ParseDecimal(sc, &r1))
|
||||
return ThrowError("expected a number for starting red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseDecimal(&g1))
|
||||
if (!ParseDecimal(sc, &g1))
|
||||
return ThrowError("expected a number for starting green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseDecimal(&b1))
|
||||
if (!ParseDecimal(sc, &b1))
|
||||
return ThrowError("expected a number for starting blue");
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
|
||||
if (!ExpectToken(":"))
|
||||
if (!ExpectToken(sc, ":"))
|
||||
return ThrowError("expected ':'");
|
||||
|
||||
if (!ExpectToken("["))
|
||||
if (!ExpectToken(sc, "["))
|
||||
return ThrowError("expected '[");
|
||||
|
||||
// end
|
||||
if (!ParseDecimal(&r2))
|
||||
if (!ParseDecimal(sc, &r2))
|
||||
return ThrowError("expected a number for ending red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseDecimal(&g2))
|
||||
if (!ParseDecimal(sc, &g2))
|
||||
return ThrowError("expected a number for ending green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
|
||||
if (!ParseDecimal(&b2))
|
||||
if (!ParseDecimal(sc, &b2))
|
||||
return ThrowError("expected a number for ending blue");
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
|
||||
PaletteRemap_AddDesaturation(tr, start, end, r1, g1, b1, r2, g2, b2);
|
||||
|
@ -483,19 +481,19 @@ struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char
|
|||
// Colourise translation
|
||||
int r, g, b;
|
||||
|
||||
if (!ExpectToken("["))
|
||||
if (!ExpectToken(sc, "["))
|
||||
return ThrowError("expected '[");
|
||||
if (!ParseNumber(&r))
|
||||
if (!ParseNumber(sc, &r))
|
||||
return ThrowError("expected a number for red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
if (!ParseNumber(&g))
|
||||
if (!ParseNumber(sc, &g))
|
||||
return ThrowError("expected a number for green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
if (!ParseNumber(&b))
|
||||
if (!ParseNumber(sc, &b))
|
||||
return ThrowError("expected a number for blue");
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
|
||||
PaletteRemap_AddColourisation(tr, start, end, r, g, b);
|
||||
|
@ -505,23 +503,23 @@ struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char
|
|||
// Tint translation
|
||||
int a, r, g, b;
|
||||
|
||||
if (!ExpectToken("["))
|
||||
if (!ExpectToken(sc, "["))
|
||||
return ThrowError("expected '[");
|
||||
if (!ParseNumber(&a))
|
||||
if (!ParseNumber(sc, &a))
|
||||
return ThrowError("expected a number for amount");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
if (!ParseNumber(&r))
|
||||
if (!ParseNumber(sc, &r))
|
||||
return ThrowError("expected a number for red");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
if (!ParseNumber(&g))
|
||||
if (!ParseNumber(sc, &g))
|
||||
return ThrowError("expected a number for green");
|
||||
if (!ExpectToken(","))
|
||||
if (!ExpectToken(sc, ","))
|
||||
return ThrowError("expected ','");
|
||||
if (!ParseNumber(&b))
|
||||
if (!ParseNumber(sc, &b))
|
||||
return ThrowError("expected a number for blue");
|
||||
if (!ExpectToken("]"))
|
||||
if (!ExpectToken(sc, "]"))
|
||||
return ThrowError("expected ']'");
|
||||
|
||||
PaletteRemap_AddTint(tr, start, end, r, g, b, a);
|
||||
|
@ -532,19 +530,25 @@ struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char
|
|||
|
||||
if (!StringToNumber(tkn, &pal1))
|
||||
return ThrowError("expected a number for starting index");
|
||||
if (!ExpectToken(":"))
|
||||
if (!ExpectToken(sc, ":"))
|
||||
return ThrowError("expected ':'");
|
||||
if (!ParseNumber(&pal2))
|
||||
if (!ParseNumber(sc, &pal2))
|
||||
return ThrowError("expected a number for ending index");
|
||||
|
||||
PaletteRemap_AddIndexRange(tr, start, end, pal1, pal2);
|
||||
}
|
||||
|
||||
M_TokenizerClose();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct PaletteRemapParseResult *PaletteRemap_ParseTranslation(remaptable_t *tr, const char *translation)
|
||||
{
|
||||
tokenizer_t *sc = Tokenizer_Open(translation, 1);
|
||||
struct PaletteRemapParseResult *error = PaletteRemap_ParseString(tr, sc);
|
||||
Tokenizer_Close(sc);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
||||
{
|
||||
char *lumpData = (char *)W_CacheLumpNumPwad(wadNum, lumpnum, PU_STATIC);
|
||||
|
@ -554,19 +558,18 @@ static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
|||
text[lumpLength] = '\0';
|
||||
Z_Free(lumpData);
|
||||
|
||||
char *p = text;
|
||||
char *tkn = M_GetToken(p);
|
||||
tokenizer_t *sc = Tokenizer_Open(text, 1);
|
||||
const char *tkn = sc->get(sc, 0);
|
||||
while (tkn != NULL)
|
||||
{
|
||||
remaptable_t *tr = NULL;
|
||||
|
||||
char *name = tkn;
|
||||
char *name = Z_StrDup(tkn);
|
||||
|
||||
tkn = M_GetToken(NULL);
|
||||
tkn = sc->get(sc, 0);
|
||||
if (strcmp(tkn, ":") == 0)
|
||||
{
|
||||
Z_Free(tkn);
|
||||
tkn = M_GetToken(NULL);
|
||||
tkn = sc->get(sc, 0);
|
||||
|
||||
remaptable_t *tbl = R_GetTranslationByID(R_FindCustomTranslation(tkn));
|
||||
if (tbl)
|
||||
|
@ -577,8 +580,7 @@ static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
Z_Free(tkn);
|
||||
tkn = M_GetToken(NULL);
|
||||
tkn = sc->get(sc, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -586,18 +588,15 @@ static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
|||
PaletteRemap_SetIdentity(tr);
|
||||
}
|
||||
|
||||
#if 0
|
||||
tkn = M_GetToken(NULL);
|
||||
if (strcmp(tkn, "=") != 0)
|
||||
{
|
||||
CONS_Alert(CONS_ERROR, "Error parsing translation '%s': Expected '=', got '%s'\n", name, tkn);
|
||||
goto fail;
|
||||
}
|
||||
Z_Free(tkn);
|
||||
#endif
|
||||
tkn = sc->get(sc, 0);
|
||||
|
||||
do {
|
||||
struct PaletteRemapParseResult *error = PaletteRemap_ParseString(tr, tkn);
|
||||
struct PaletteRemapParseResult *error = PaletteRemap_ParseTranslation(tr, tkn);
|
||||
if (error)
|
||||
{
|
||||
CONS_Alert(CONS_ERROR, "Error parsing translation '%s': %s\n", name, error->error);
|
||||
|
@ -605,16 +604,14 @@ static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
Z_Free(tkn);
|
||||
tkn = M_GetToken(NULL);
|
||||
tkn = sc->get(sc, 0);
|
||||
if (!tkn)
|
||||
break;
|
||||
|
||||
if (strcmp(tkn, ",") != 0)
|
||||
break;
|
||||
|
||||
Z_Free(tkn);
|
||||
tkn = M_GetToken(NULL);
|
||||
tkn = sc->get(sc, 0);
|
||||
} while (true);
|
||||
|
||||
// add it
|
||||
|
@ -624,8 +621,8 @@ static void P_ParseTrnslate(INT32 wadNum, UINT16 lumpnum)
|
|||
}
|
||||
|
||||
fail:
|
||||
Z_Free(tkn);
|
||||
Z_Free((void *)text);
|
||||
Tokenizer_Close(sc);
|
||||
Z_Free(text);
|
||||
}
|
||||
|
||||
void R_LoadTrnslateLumps(void)
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
/// \file r_translation.h
|
||||
/// \brief Translations
|
||||
|
||||
#ifndef __R_TRANSLATION__
|
||||
#define __R_TRANSLATION__
|
||||
|
||||
#include "doomdef.h"
|
||||
|
||||
typedef struct
|
||||
|
@ -36,10 +39,12 @@ struct PaletteRemapParseResult
|
|||
char error[4096];
|
||||
};
|
||||
|
||||
struct PaletteRemapParseResult *PaletteRemap_ParseString(remaptable_t *tr, char *translation);
|
||||
struct PaletteRemapParseResult *PaletteRemap_ParseTranslation(remaptable_t *tr, const char *translation);
|
||||
|
||||
int R_FindCustomTranslation(const char *name);
|
||||
void R_AddCustomTranslation(const char *name, int trnum);
|
||||
remaptable_t *R_GetTranslationByID(int id);
|
||||
|
||||
void R_LoadTrnslateLumps(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -292,6 +292,7 @@
|
|||
<ClInclude Include="..\m_fixed.h" />
|
||||
<ClInclude Include="..\m_menu.h" />
|
||||
<ClInclude Include="..\m_misc.h" />
|
||||
<ClInclude Include="..\m_tokenizer.h" />
|
||||
<ClInclude Include="..\m_perfstats.h" />
|
||||
<ClInclude Include="..\m_queue.h" />
|
||||
<ClInclude Include="..\m_random.h" />
|
||||
|
@ -469,6 +470,7 @@
|
|||
<ClCompile Include="..\m_fixed.c" />
|
||||
<ClCompile Include="..\m_menu.c" />
|
||||
<ClCompile Include="..\m_misc.c" />
|
||||
<ClCompile Include="..\m_tokenizer.c" />
|
||||
<ClCompile Include="..\m_perfstats.c" />
|
||||
<ClCompile Include="..\m_queue.c" />
|
||||
<ClCompile Include="..\m_random.c" />
|
||||
|
|
|
@ -339,6 +339,9 @@
|
|||
<ClInclude Include="..\m_misc.h">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\m_tokenizer.h">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\m_perfstats.h">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClInclude>
|
||||
|
@ -834,6 +837,9 @@
|
|||
<ClCompile Include="..\m_misc.c">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\m_tokenizer.c">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\m_perfstats.c">
|
||||
<Filter>M_Misc</Filter>
|
||||
</ClCompile>
|
||||
|
|
Loading…
Reference in a new issue