2012-11-24 21:22:35 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012
|
|
|
|
* Wolfgang Bumiller
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
|
|
* this software and associated documentation files (the "Software"), to deal in
|
|
|
|
* the Software without restriction, including without limitation the rights to
|
|
|
|
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
|
|
* of the Software, and to permit persons to whom the Software is furnished to do
|
|
|
|
* so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in all
|
|
|
|
* copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
2012-07-16 11:59:10 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
|
|
|
#include "gmqcc.h"
|
|
|
|
#include "lexer.h"
|
|
|
|
|
2012-11-19 17:57:37 +00:00
|
|
|
/*
|
|
|
|
* List of Keywords
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* original */
|
|
|
|
static const char *keywords_qc[] = {
|
|
|
|
"for", "do", "while",
|
|
|
|
"if", "else",
|
|
|
|
"local",
|
|
|
|
"return",
|
|
|
|
"const"
|
|
|
|
};
|
|
|
|
static size_t num_keywords_qc = sizeof(keywords_qc) / sizeof(keywords_qc[0]);
|
|
|
|
|
|
|
|
/* For fte/gmgqcc */
|
|
|
|
static const char *keywords_fg[] = {
|
2012-11-19 20:40:38 +00:00
|
|
|
"switch", "case", "default",
|
2012-11-19 17:57:37 +00:00
|
|
|
"struct", "union",
|
2012-11-25 13:29:59 +00:00
|
|
|
"break", "continue",
|
2012-11-25 20:56:21 +00:00
|
|
|
"typedef",
|
2012-11-25 21:57:11 +00:00
|
|
|
"goto",
|
|
|
|
|
|
|
|
"__builtin_debug_printtype"
|
2012-11-19 17:57:37 +00:00
|
|
|
};
|
|
|
|
static size_t num_keywords_fg = sizeof(keywords_fg) / sizeof(keywords_fg[0]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lexer code
|
|
|
|
*/
|
|
|
|
|
2012-11-15 17:32:03 +00:00
|
|
|
char* *lex_filenames;
|
2012-08-18 14:47:33 +00:00
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
void lexerror(lex_file *lex, const char *fmt, ...)
|
|
|
|
{
|
2012-11-18 19:56:11 +00:00
|
|
|
va_list ap;
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-11-18 19:56:11 +00:00
|
|
|
va_start(ap, fmt);
|
|
|
|
if (lex)
|
2012-11-17 09:36:35 +00:00
|
|
|
con_vprintmsg(LVL_ERROR, lex->name, lex->sline, "parse error", fmt, ap);
|
|
|
|
else
|
|
|
|
con_vprintmsg(LVL_ERROR, "", 0, "parse error", fmt, ap);
|
2012-11-18 19:56:11 +00:00
|
|
|
va_end(ap);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
2012-08-23 17:20:50 +00:00
|
|
|
bool lexwarn(lex_file *lex, int warntype, const char *fmt, ...)
|
2012-08-08 10:24:01 +00:00
|
|
|
{
|
2012-11-18 19:56:11 +00:00
|
|
|
va_list ap;
|
|
|
|
int lvl = LVL_WARNING;
|
2012-08-08 10:24:01 +00:00
|
|
|
|
2012-08-23 17:20:50 +00:00
|
|
|
if (!OPTS_WARN(warntype))
|
2012-08-23 17:16:26 +00:00
|
|
|
return false;
|
2012-08-08 10:24:01 +00:00
|
|
|
|
2012-12-06 12:23:53 +00:00
|
|
|
if (opts.werror)
|
2012-11-18 19:56:11 +00:00
|
|
|
lvl = LVL_ERROR;
|
2012-08-08 10:24:01 +00:00
|
|
|
|
2012-11-18 19:56:11 +00:00
|
|
|
va_start(ap, fmt);
|
2012-12-06 12:23:53 +00:00
|
|
|
con_vprintmsg(lvl, lex->name, lex->sline, (opts.werror ? "error" : "warning"), fmt, ap);
|
2012-11-18 19:56:11 +00:00
|
|
|
va_end(ap);
|
2012-08-23 17:16:26 +00:00
|
|
|
|
2012-12-06 12:23:53 +00:00
|
|
|
return opts.werror;
|
2012-08-08 10:24:01 +00:00
|
|
|
}
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
|
|
|
|
#if 0
|
2012-07-16 11:59:10 +00:00
|
|
|
token* token_new()
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
token *tok = (token*)mem_a(sizeof(token));
|
|
|
|
if (!tok)
|
|
|
|
return NULL;
|
|
|
|
memset(tok, 0, sizeof(*tok));
|
|
|
|
return tok;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void token_delete(token *self)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
if (self->next && self->next->prev == self)
|
|
|
|
self->next->prev = self->prev;
|
|
|
|
if (self->prev && self->prev->next == self)
|
|
|
|
self->prev->next = self->next;
|
|
|
|
MEM_VECTOR_CLEAR(self, value);
|
|
|
|
mem_d(self);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
token* token_copy(const token *cp)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
token* self = token_new();
|
|
|
|
if (!self)
|
|
|
|
return NULL;
|
|
|
|
/* copy the value */
|
|
|
|
self->value_alloc = cp->value_count + 1;
|
|
|
|
self->value_count = cp->value_count;
|
|
|
|
self->value = (char*)mem_a(self->value_alloc);
|
|
|
|
if (!self->value) {
|
|
|
|
mem_d(self);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(self->value, cp->value, cp->value_count);
|
|
|
|
self->value[self->value_alloc-1] = 0;
|
|
|
|
|
|
|
|
/* rest */
|
|
|
|
self->ctx = cp->ctx;
|
|
|
|
self->ttype = cp->ttype;
|
|
|
|
memcpy(&self->constval, &cp->constval, sizeof(self->constval));
|
|
|
|
return self;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void token_delete_all(token *t)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
token *n;
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
do {
|
|
|
|
n = t->next;
|
|
|
|
token_delete(t);
|
|
|
|
t = n;
|
|
|
|
} while(t);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
token* token_copy_all(const token *cp)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
token *cur;
|
|
|
|
token *out;
|
|
|
|
|
|
|
|
out = cur = token_copy(cp);
|
|
|
|
if (!out)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
while (cp->next) {
|
|
|
|
cp = cp->next;
|
|
|
|
cur->next = token_copy(cp);
|
|
|
|
if (!cur->next) {
|
|
|
|
token_delete_all(out);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
cur->next->prev = cur;
|
|
|
|
cur = cur->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return out;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
2012-08-24 15:37:55 +00:00
|
|
|
#else
|
|
|
|
static void lex_token_new(lex_file *lex)
|
|
|
|
{
|
|
|
|
#if 0
|
|
|
|
if (lex->tok)
|
|
|
|
token_delete(lex->tok);
|
|
|
|
lex->tok = token_new();
|
|
|
|
#else
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->tok.value)
|
|
|
|
vec_shrinkto(lex->tok.value, 0);
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.constval.t = 0;
|
|
|
|
lex->tok.ctx.line = lex->sline;
|
|
|
|
lex->tok.ctx.file = lex->name;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2012-07-16 11:59:10 +00:00
|
|
|
|
|
|
|
lex_file* lex_open(const char *file)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
lex_file *lex;
|
|
|
|
FILE *in = util_fopen(file, "rb");
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!in) {
|
|
|
|
lexerror(NULL, "open failed: '%s'\n", file);
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
lex = (lex_file*)mem_a(sizeof(*lex));
|
|
|
|
if (!lex) {
|
|
|
|
fclose(in);
|
|
|
|
lexerror(NULL, "out of memory\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
memset(lex, 0, sizeof(*lex));
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->file = in;
|
|
|
|
lex->name = util_strdup(file);
|
|
|
|
lex->line = 1; /* we start counting at 1 */
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->peekpos = 0;
|
2012-08-20 16:12:04 +00:00
|
|
|
lex->eof = false;
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-11-15 17:32:03 +00:00
|
|
|
vec_push(lex_filenames, lex->name);
|
2012-08-18 12:27:19 +00:00
|
|
|
return lex;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
2012-11-11 09:27:09 +00:00
|
|
|
lex_file* lex_open_string(const char *str, size_t len, const char *name)
|
|
|
|
{
|
|
|
|
lex_file *lex;
|
|
|
|
|
|
|
|
lex = (lex_file*)mem_a(sizeof(*lex));
|
|
|
|
if (!lex) {
|
|
|
|
lexerror(NULL, "out of memory\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(lex, 0, sizeof(*lex));
|
|
|
|
|
|
|
|
lex->file = NULL;
|
|
|
|
lex->open_string = str;
|
|
|
|
lex->open_string_length = len;
|
|
|
|
lex->open_string_pos = 0;
|
|
|
|
|
|
|
|
lex->name = util_strdup(name ? name : "<string-source>");
|
|
|
|
lex->line = 1; /* we start counting at 1 */
|
|
|
|
|
|
|
|
lex->peekpos = 0;
|
|
|
|
lex->eof = false;
|
|
|
|
|
2012-11-15 17:32:03 +00:00
|
|
|
vec_push(lex_filenames, lex->name);
|
2012-11-11 09:27:09 +00:00
|
|
|
|
|
|
|
return lex;
|
|
|
|
}
|
|
|
|
|
2012-08-18 14:47:33 +00:00
|
|
|
void lex_cleanup(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
2012-11-15 17:32:03 +00:00
|
|
|
for (i = 0; i < vec_size(lex_filenames); ++i)
|
|
|
|
mem_d(lex_filenames[i]);
|
|
|
|
vec_free(lex_filenames);
|
2012-08-18 14:47:33 +00:00
|
|
|
}
|
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
void lex_close(lex_file *lex)
|
|
|
|
{
|
2012-08-18 13:57:21 +00:00
|
|
|
size_t i;
|
2012-11-15 17:48:38 +00:00
|
|
|
for (i = 0; i < vec_size(lex->frames); ++i)
|
2012-08-18 13:57:21 +00:00
|
|
|
mem_d(lex->frames[i].name);
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_free(lex->frames);
|
2012-08-18 13:57:21 +00:00
|
|
|
|
2012-08-18 14:04:20 +00:00
|
|
|
if (lex->modelname)
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_free(lex->modelname);
|
2012-08-18 14:04:20 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
if (lex->file)
|
|
|
|
fclose(lex->file);
|
2012-08-24 15:37:55 +00:00
|
|
|
#if 0
|
2012-08-18 12:27:19 +00:00
|
|
|
if (lex->tok)
|
|
|
|
token_delete(lex->tok);
|
2012-08-24 15:37:55 +00:00
|
|
|
#else
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_free(lex->tok.value);
|
2012-08-24 15:37:55 +00:00
|
|
|
#endif
|
2012-08-18 14:47:33 +00:00
|
|
|
/* mem_d(lex->name); collected in lex_filenames */
|
2012-08-18 12:27:19 +00:00
|
|
|
mem_d(lex);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
2012-11-11 09:27:09 +00:00
|
|
|
static int lex_fgetc(lex_file *lex)
|
|
|
|
{
|
|
|
|
if (lex->file)
|
|
|
|
return fgetc(lex->file);
|
|
|
|
if (lex->open_string) {
|
|
|
|
if (lex->open_string_pos >= lex->open_string_length)
|
|
|
|
return EOF;
|
|
|
|
return lex->open_string[lex->open_string_pos++];
|
|
|
|
}
|
|
|
|
return EOF;
|
|
|
|
}
|
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
/* Get or put-back data
|
|
|
|
* The following to functions do NOT understand what kind of data they
|
|
|
|
* are working on.
|
|
|
|
* The are merely wrapping get/put in order to count line numbers.
|
|
|
|
*/
|
2012-10-29 12:52:39 +00:00
|
|
|
static void lex_ungetch(lex_file *lex, int ch);
|
|
|
|
static int lex_try_trigraph(lex_file *lex, int old)
|
|
|
|
{
|
|
|
|
int c2, c3;
|
2012-11-11 09:27:09 +00:00
|
|
|
c2 = lex_fgetc(lex);
|
2012-10-29 12:52:39 +00:00
|
|
|
if (c2 != '?') {
|
|
|
|
lex_ungetch(lex, c2);
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2012-11-11 09:27:09 +00:00
|
|
|
c3 = lex_fgetc(lex);
|
2012-10-29 12:52:39 +00:00
|
|
|
switch (c3) {
|
|
|
|
case '=': return '#';
|
|
|
|
case '/': return '\\';
|
|
|
|
case '\'': return '^';
|
|
|
|
case '(': return '[';
|
|
|
|
case ')': return ']';
|
|
|
|
case '!': return '|';
|
|
|
|
case '<': return '{';
|
|
|
|
case '>': return '}';
|
|
|
|
case '-': return '~';
|
|
|
|
default:
|
|
|
|
lex_ungetch(lex, c3);
|
|
|
|
lex_ungetch(lex, c2);
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-29 12:56:00 +00:00
|
|
|
static int lex_try_digraph(lex_file *lex, int ch)
|
|
|
|
{
|
|
|
|
int c2;
|
2012-11-11 09:27:09 +00:00
|
|
|
c2 = lex_fgetc(lex);
|
2012-11-25 20:07:03 +00:00
|
|
|
/* we just used fgetc() so count lines
|
|
|
|
* need to offset a \n the ungetch would recognize
|
|
|
|
*/
|
|
|
|
if (!lex->push_line && c2 == '\n')
|
|
|
|
lex->line++;
|
2012-10-29 12:56:00 +00:00
|
|
|
if (ch == '<' && c2 == ':')
|
|
|
|
return '[';
|
|
|
|
else if (ch == ':' && c2 == '>')
|
|
|
|
return ']';
|
|
|
|
else if (ch == '<' && c2 == '%')
|
|
|
|
return '{';
|
|
|
|
else if (ch == '%' && c2 == '>')
|
|
|
|
return '}';
|
|
|
|
else if (ch == '%' && c2 == ':')
|
|
|
|
return '#';
|
|
|
|
lex_ungetch(lex, c2);
|
|
|
|
return ch;
|
|
|
|
}
|
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
static int lex_getch(lex_file *lex)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
int ch;
|
|
|
|
|
|
|
|
if (lex->peekpos) {
|
|
|
|
lex->peekpos--;
|
2012-11-18 14:14:42 +00:00
|
|
|
if (!lex->push_line && lex->peek[lex->peekpos] == '\n')
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->line++;
|
|
|
|
return lex->peek[lex->peekpos];
|
|
|
|
}
|
|
|
|
|
2012-11-11 09:27:09 +00:00
|
|
|
ch = lex_fgetc(lex);
|
2012-11-18 14:14:42 +00:00
|
|
|
if (!lex->push_line && ch == '\n')
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->line++;
|
2012-10-29 12:52:39 +00:00
|
|
|
else if (ch == '?')
|
|
|
|
return lex_try_trigraph(lex, ch);
|
2012-11-01 13:05:14 +00:00
|
|
|
else if (!lex->flags.nodigraphs && (ch == '<' || ch == ':' || ch == '%'))
|
2012-10-29 12:56:00 +00:00
|
|
|
return lex_try_digraph(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
return ch;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void lex_ungetch(lex_file *lex, int ch)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->peek[lex->peekpos++] = ch;
|
2012-11-18 14:14:42 +00:00
|
|
|
if (!lex->push_line && ch == '\n')
|
2012-08-18 12:27:19 +00:00
|
|
|
lex->line--;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* classify characters
|
|
|
|
* some additions to the is*() functions of ctype.h
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Idents are alphanumberic, but they start with alpha or _ */
|
|
|
|
static bool isident_start(int ch)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
return isalpha(ch) || ch == '_';
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool isident(int ch)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
return isident_start(ch) || isdigit(ch);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* isxdigit_only is used when we already know it's not a digit
|
|
|
|
* and want to see if it's a hex digit anyway.
|
|
|
|
*/
|
|
|
|
static bool isxdigit_only(int ch)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
return (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F');
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
2012-11-01 22:22:58 +00:00
|
|
|
/* Append a character to the token buffer */
|
2012-11-15 17:48:38 +00:00
|
|
|
static void lex_tokench(lex_file *lex, int ch)
|
2012-11-01 22:22:58 +00:00
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_push(lex->tok.value, ch);
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Append a trailing null-byte */
|
2012-11-15 17:48:38 +00:00
|
|
|
static void lex_endtoken(lex_file *lex)
|
2012-11-01 22:22:58 +00:00
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_push(lex->tok.value, 0);
|
|
|
|
vec_shrinkby(lex->tok.value, 1);
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
2012-11-18 14:14:42 +00:00
|
|
|
static bool lex_try_pragma(lex_file *lex)
|
|
|
|
{
|
|
|
|
int ch;
|
|
|
|
char *pragma = NULL;
|
|
|
|
char *command = NULL;
|
|
|
|
char *param = NULL;
|
2012-11-18 14:23:00 +00:00
|
|
|
size_t line;
|
2012-11-18 14:14:42 +00:00
|
|
|
|
|
|
|
if (lex->flags.preprocessing)
|
|
|
|
return false;
|
|
|
|
|
2012-11-18 14:23:00 +00:00
|
|
|
line = lex->line;
|
|
|
|
|
2012-11-18 14:14:42 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch != '#') {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ch = lex_getch(lex); vec_size(pragma) < 8 && ch >= 'a' && ch <= 'z'; ch = lex_getch(lex))
|
|
|
|
vec_push(pragma, ch);
|
|
|
|
vec_push(pragma, 0);
|
|
|
|
|
|
|
|
if (ch != ' ' || strcmp(pragma, "pragma")) {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
goto unroll;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ch = lex_getch(lex); vec_size(command) < 32 && ch >= 'a' && ch <= 'z'; ch = lex_getch(lex))
|
|
|
|
vec_push(command, ch);
|
|
|
|
vec_push(command, 0);
|
|
|
|
|
|
|
|
if (ch != '(') {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
goto unroll;
|
|
|
|
}
|
|
|
|
|
2012-11-30 16:28:46 +00:00
|
|
|
for (ch = lex_getch(lex); vec_size(param) < 1024 && ch != ')' && ch != '\n'; ch = lex_getch(lex))
|
2012-11-18 14:14:42 +00:00
|
|
|
vec_push(param, ch);
|
|
|
|
vec_push(param, 0);
|
|
|
|
|
|
|
|
if (ch != ')') {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
goto unroll;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(command, "push")) {
|
|
|
|
if (!strcmp(param, "line")) {
|
|
|
|
lex->push_line++;
|
2012-11-25 17:13:46 +00:00
|
|
|
if (lex->push_line == 1)
|
|
|
|
--line;
|
2012-11-18 14:14:42 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
goto unroll;
|
|
|
|
}
|
|
|
|
else if (!strcmp(command, "pop")) {
|
|
|
|
if (!strcmp(param, "line")) {
|
|
|
|
if (lex->push_line)
|
|
|
|
lex->push_line--;
|
2012-11-25 17:13:46 +00:00
|
|
|
if (lex->push_line == 0)
|
|
|
|
--line;
|
2012-11-18 14:14:42 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
goto unroll;
|
|
|
|
}
|
2012-11-18 14:30:40 +00:00
|
|
|
else if (!strcmp(command, "file")) {
|
|
|
|
lex->name = util_strdup(param);
|
|
|
|
vec_push(lex_filenames, lex->name);
|
|
|
|
}
|
2012-11-18 14:32:03 +00:00
|
|
|
else if (!strcmp(command, "line")) {
|
|
|
|
line = strtol(param, NULL, 0)-1;
|
|
|
|
}
|
2012-11-18 14:14:42 +00:00
|
|
|
else
|
|
|
|
goto unroll;
|
|
|
|
|
2012-11-18 14:30:40 +00:00
|
|
|
lex->line = line;
|
2012-11-18 15:22:07 +00:00
|
|
|
while (ch != '\n' && ch != EOF)
|
2012-11-18 14:14:42 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
unroll:
|
|
|
|
if (command) {
|
|
|
|
vec_pop(command);
|
|
|
|
while (vec_size(command)) {
|
|
|
|
lex_ungetch(lex, vec_last(command));
|
|
|
|
vec_pop(command);
|
|
|
|
}
|
|
|
|
vec_free(command);
|
2012-12-03 19:37:02 +00:00
|
|
|
lex_ungetch(lex, ' ');
|
2012-11-18 14:14:42 +00:00
|
|
|
}
|
|
|
|
if (command) {
|
|
|
|
vec_pop(command);
|
|
|
|
while (vec_size(command)) {
|
|
|
|
lex_ungetch(lex, vec_last(command));
|
|
|
|
vec_pop(command);
|
|
|
|
}
|
|
|
|
vec_free(command);
|
2012-12-03 19:37:02 +00:00
|
|
|
lex_ungetch(lex, ' ');
|
2012-11-18 14:14:42 +00:00
|
|
|
}
|
|
|
|
if (pragma) {
|
|
|
|
vec_pop(pragma);
|
|
|
|
while (vec_size(pragma)) {
|
|
|
|
lex_ungetch(lex, vec_last(pragma));
|
|
|
|
vec_pop(pragma);
|
|
|
|
}
|
|
|
|
vec_free(pragma);
|
|
|
|
}
|
|
|
|
lex_ungetch(lex, '#');
|
2012-11-18 14:23:00 +00:00
|
|
|
|
|
|
|
lex->line = line;
|
2012-11-18 14:14:42 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
/* Skip whitespace and comments and return the first
|
|
|
|
* non-white character.
|
|
|
|
* As this makes use of the above getch() ungetch() functions,
|
|
|
|
* we don't need to care at all about line numbering anymore.
|
|
|
|
*
|
|
|
|
* In theory, this function should only be used at the beginning
|
|
|
|
* of lexing, or when we *know* the next character is part of the token.
|
|
|
|
* Otherwise, if the parser throws an error, the linenumber may not be
|
|
|
|
* the line of the error, but the line of the next token AFTER the error.
|
|
|
|
*
|
|
|
|
* This is currently only problematic when using c-like string-continuation,
|
|
|
|
* since comments and whitespaces are allowed between 2 such strings.
|
|
|
|
* Example:
|
|
|
|
printf( "line one\n"
|
|
|
|
// A comment
|
|
|
|
"A continuation of the previous string"
|
|
|
|
// This line is skipped
|
|
|
|
, foo);
|
|
|
|
|
|
|
|
* In this case, if the parse decides it didn't actually want a string,
|
|
|
|
* and uses lex->line to print an error, it will show the ', foo);' line's
|
|
|
|
* linenumber.
|
|
|
|
*
|
|
|
|
* On the other hand, the parser is supposed to remember the line of the next
|
|
|
|
* token's beginning. In this case we would want skipwhite() to be called
|
|
|
|
* AFTER reading a token, so that the parser, before reading the NEXT token,
|
|
|
|
* doesn't store teh *comment's* linenumber, but the actual token's linenumber.
|
|
|
|
*
|
|
|
|
* THIS SOLUTION
|
|
|
|
* here is to store the line of the first character after skipping
|
|
|
|
* the initial whitespace in lex->sline, this happens in lex_do.
|
|
|
|
*/
|
2012-11-30 20:51:18 +00:00
|
|
|
static int lex_skipwhite(lex_file *lex, bool hadwhite)
|
2012-07-16 11:59:10 +00:00
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
int ch = 0;
|
2012-11-30 20:51:18 +00:00
|
|
|
bool haswhite = hadwhite;
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
do
|
|
|
|
{
|
|
|
|
ch = lex_getch(lex);
|
2012-11-01 22:22:58 +00:00
|
|
|
while (ch != EOF && isspace(ch)) {
|
2012-11-18 14:14:42 +00:00
|
|
|
if (ch == '\n') {
|
2012-11-18 15:17:19 +00:00
|
|
|
if (lex_try_pragma(lex))
|
2012-11-18 14:14:42 +00:00
|
|
|
continue;
|
|
|
|
}
|
2012-11-01 22:22:58 +00:00
|
|
|
if (lex->flags.preprocessing) {
|
|
|
|
if (ch == '\n') {
|
|
|
|
/* end-of-line */
|
|
|
|
/* see if there was whitespace first */
|
2012-11-15 17:48:38 +00:00
|
|
|
if (haswhite) { /* (vec_size(lex->tok.value)) { */
|
2012-11-01 22:22:58 +00:00
|
|
|
lex_ungetch(lex, ch);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-11-01 22:22:58 +00:00
|
|
|
return TOKEN_WHITE;
|
|
|
|
}
|
|
|
|
/* otherwise return EOL */
|
|
|
|
return TOKEN_EOL;
|
|
|
|
}
|
2012-11-02 17:28:54 +00:00
|
|
|
haswhite = true;
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
if (ch == '/') {
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch == '/')
|
|
|
|
{
|
|
|
|
/* one line comment */
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
|
2012-11-01 22:22:58 +00:00
|
|
|
if (lex->flags.preprocessing) {
|
2012-11-02 17:48:32 +00:00
|
|
|
haswhite = true;
|
2012-11-16 22:13:53 +00:00
|
|
|
/*
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, '/');
|
|
|
|
lex_tokench(lex, '/');
|
2012-11-16 22:13:53 +00:00
|
|
|
*/
|
|
|
|
lex_tokench(lex, ' ');
|
|
|
|
lex_tokench(lex, ' ');
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
while (ch != EOF && ch != '\n') {
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->flags.preprocessing)
|
2012-11-16 22:13:53 +00:00
|
|
|
lex_tokench(lex, ' '); /* ch); */
|
2012-11-02 17:39:32 +00:00
|
|
|
ch = lex_getch(lex);
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
if (lex->flags.preprocessing) {
|
|
|
|
lex_ungetch(lex, '\n');
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-11-01 22:22:58 +00:00
|
|
|
return TOKEN_WHITE;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ch == '*')
|
|
|
|
{
|
|
|
|
/* multiline comment */
|
2012-11-01 22:22:58 +00:00
|
|
|
if (lex->flags.preprocessing) {
|
2012-11-02 17:48:32 +00:00
|
|
|
haswhite = true;
|
2012-11-16 22:13:53 +00:00
|
|
|
/*
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, '/');
|
|
|
|
lex_tokench(lex, '*');
|
2012-11-16 22:13:53 +00:00
|
|
|
*/
|
|
|
|
lex_tokench(lex, ' ');
|
|
|
|
lex_tokench(lex, ' ');
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
while (ch != EOF)
|
|
|
|
{
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch == '*') {
|
|
|
|
ch = lex_getch(lex);
|
2012-11-01 22:22:58 +00:00
|
|
|
if (ch == '/') {
|
|
|
|
if (lex->flags.preprocessing) {
|
2012-11-16 22:13:53 +00:00
|
|
|
/*
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, '*');
|
|
|
|
lex_tokench(lex, '/');
|
2012-11-16 22:13:53 +00:00
|
|
|
*/
|
|
|
|
lex_tokench(lex, ' ');
|
|
|
|
lex_tokench(lex, ' ');
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
break;
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
2012-11-23 20:35:14 +00:00
|
|
|
lex_ungetch(lex, ch);
|
2012-11-01 22:22:58 +00:00
|
|
|
}
|
|
|
|
if (lex->flags.preprocessing) {
|
2012-11-23 20:47:00 +00:00
|
|
|
if (ch == '\n')
|
|
|
|
lex_tokench(lex, '\n');
|
|
|
|
else
|
|
|
|
lex_tokench(lex, ' '); /* ch); */
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
}
|
2012-11-02 17:30:20 +00:00
|
|
|
ch = ' '; /* cause TRUE in the isspace check */
|
2012-08-18 12:27:19 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Otherwise roll back to the slash and break out of the loop */
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
ch = '/';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (ch != EOF && isspace(ch));
|
|
|
|
|
2012-11-02 17:28:54 +00:00
|
|
|
if (haswhite) {
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-11-02 17:28:54 +00:00
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
return TOKEN_WHITE;
|
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
return ch;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a token */
|
|
|
|
static bool GMQCC_WARN lex_finish_ident(lex_file *lex)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
int ch;
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (ch != EOF && isident(ch))
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
}
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
/* last ch was not an ident ch: */
|
|
|
|
lex_ungetch(lex, ch);
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
return true;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
2012-08-16 18:47:31 +00:00
|
|
|
/* read one ident for the frame list */
|
|
|
|
static int lex_parse_frame(lex_file *lex)
|
|
|
|
{
|
|
|
|
int ch;
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex_token_new(lex);
|
2012-08-16 18:47:31 +00:00
|
|
|
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (ch != EOF && ch != '\n' && isspace(ch))
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
|
|
|
|
if (ch == '\n')
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!isident_start(ch)) {
|
|
|
|
lexerror(lex, "invalid framename, must start with one of a-z or _, got %c", ch);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-16 18:47:31 +00:00
|
|
|
if (!lex_finish_ident(lex))
|
|
|
|
return -1;
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-16 18:47:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read a list of $frames */
|
|
|
|
static bool lex_finish_frames(lex_file *lex)
|
|
|
|
{
|
|
|
|
do {
|
2012-08-23 17:16:26 +00:00
|
|
|
size_t i;
|
|
|
|
int rc;
|
2012-08-16 18:47:31 +00:00
|
|
|
frame_macro m;
|
|
|
|
|
|
|
|
rc = lex_parse_frame(lex);
|
|
|
|
if (rc > 0) /* end of line */
|
|
|
|
return true;
|
|
|
|
if (rc < 0) /* error */
|
|
|
|
return false;
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
for (i = 0; i < vec_size(lex->frames); ++i) {
|
2012-08-24 15:37:55 +00:00
|
|
|
if (!strcmp(lex->tok.value, lex->frames[i].name)) {
|
2012-08-23 17:16:26 +00:00
|
|
|
lex->frames[i].value = lex->framevalue++;
|
2012-08-24 15:37:55 +00:00
|
|
|
if (lexwarn(lex, WARN_FRAME_MACROS, "duplicate frame macro defined: `%s`", lex->tok.value))
|
2012-08-23 17:16:26 +00:00
|
|
|
return false;
|
2012-08-23 20:40:51 +00:00
|
|
|
break;
|
2012-08-23 17:16:26 +00:00
|
|
|
}
|
|
|
|
}
|
2012-11-15 17:48:38 +00:00
|
|
|
if (i < vec_size(lex->frames))
|
2012-08-23 20:40:51 +00:00
|
|
|
continue;
|
2012-08-23 17:16:26 +00:00
|
|
|
|
2012-08-16 18:47:31 +00:00
|
|
|
m.value = lex->framevalue++;
|
2012-11-15 17:48:38 +00:00
|
|
|
m.name = util_strdup(lex->tok.value);
|
|
|
|
vec_shrinkto(lex->tok.value, 0);
|
|
|
|
vec_push(lex->frames, m);
|
2012-08-16 18:47:31 +00:00
|
|
|
} while (true);
|
|
|
|
}
|
|
|
|
|
2012-07-16 11:59:10 +00:00
|
|
|
static int GMQCC_WARN lex_finish_string(lex_file *lex, int quote)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
int ch = 0;
|
2012-11-30 13:18:15 +00:00
|
|
|
int nextch;
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
while (ch != EOF)
|
|
|
|
{
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch == quote)
|
|
|
|
return TOKEN_STRINGCONST;
|
|
|
|
|
2012-11-18 13:26:40 +00:00
|
|
|
if (lex->flags.preprocessing && ch == '\\') {
|
|
|
|
lex_tokench(lex, ch);
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch == EOF) {
|
|
|
|
lexerror(lex, "unexpected end of file");
|
|
|
|
lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
|
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
|
|
|
}
|
|
|
|
lex_tokench(lex, ch);
|
|
|
|
}
|
|
|
|
else if (ch == '\\') {
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch == EOF) {
|
|
|
|
lexerror(lex, "unexpected end of file");
|
|
|
|
lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
2012-08-08 10:24:01 +00:00
|
|
|
|
|
|
|
switch (ch) {
|
|
|
|
case '\\': break;
|
2012-11-18 13:26:40 +00:00
|
|
|
case '\'': break;
|
|
|
|
case '"': break;
|
2012-08-08 10:24:01 +00:00
|
|
|
case 'a': ch = '\a'; break;
|
|
|
|
case 'b': ch = '\b'; break;
|
|
|
|
case 'r': ch = '\r'; break;
|
|
|
|
case 'n': ch = '\n'; break;
|
|
|
|
case 't': ch = '\t'; break;
|
|
|
|
case 'f': ch = '\f'; break;
|
|
|
|
case 'v': ch = '\v'; break;
|
2012-11-30 13:18:15 +00:00
|
|
|
case 'x':
|
|
|
|
case 'X':
|
|
|
|
/* same procedure as in fteqcc */
|
|
|
|
ch = 0;
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
if (nextch >= '0' && nextch <= '9')
|
|
|
|
ch += nextch - '0';
|
|
|
|
else if (nextch >= 'a' && nextch <= 'f')
|
|
|
|
ch += nextch - 'a' + 10;
|
|
|
|
else if (nextch >= 'A' && nextch <= 'F')
|
|
|
|
ch += nextch - 'A' + 10;
|
|
|
|
else {
|
|
|
|
lexerror(lex, "bad character code");
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
|
|
|
}
|
|
|
|
|
|
|
|
ch *= 0x10;
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
if (nextch >= '0' && nextch <= '9')
|
|
|
|
ch += nextch - '0';
|
|
|
|
else if (nextch >= 'a' && nextch <= 'f')
|
|
|
|
ch += nextch - 'a' + 10;
|
|
|
|
else if (nextch >= 'A' && nextch <= 'F')
|
|
|
|
ch += nextch - 'A' + 10;
|
|
|
|
else {
|
|
|
|
lexerror(lex, "bad character code");
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* fteqcc support */
|
|
|
|
case '0': case '1': case '2': case '3':
|
|
|
|
case '4': case '5': case '6': case '7':
|
|
|
|
case '8': case '9':
|
|
|
|
ch = 18 + ch - '0';
|
|
|
|
break;
|
|
|
|
case '<': ch = 29; break;
|
|
|
|
case '-': ch = 30; break;
|
|
|
|
case '>': ch = 31; break;
|
2012-11-30 13:18:45 +00:00
|
|
|
case '[': ch = 16; break;
|
|
|
|
case ']': ch = 17; break;
|
2012-11-30 13:22:00 +00:00
|
|
|
case '{':
|
|
|
|
ch = 0;
|
|
|
|
for (nextch = lex_getch(lex); nextch != '}'; nextch = lex_getch(lex)) {
|
|
|
|
ch = ch * 10 + nextch - '0';
|
|
|
|
if (nextch < '0' || nextch > '9' || ch > 255) {
|
|
|
|
lexerror(lex, "bad character code");
|
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2012-11-25 17:38:04 +00:00
|
|
|
case '\n': ch = '\n'; break;
|
2012-11-30 13:18:15 +00:00
|
|
|
|
2012-08-08 10:24:01 +00:00
|
|
|
default:
|
|
|
|
lexwarn(lex, WARN_UNKNOWN_CONTROL_SEQUENCE, "unrecognized control sequence: \\%c", ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
/* so we just add the character plus backslash no matter what it actually is */
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, '\\');
|
2012-08-08 10:24:01 +00:00
|
|
|
}
|
|
|
|
/* add the character finally */
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
2012-11-15 17:48:38 +00:00
|
|
|
else
|
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
lexerror(lex, "unexpected end of file within string constant");
|
|
|
|
lex_ungetch(lex, EOF); /* next token to be TOKEN_EOF */
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int GMQCC_WARN lex_finish_digit(lex_file *lex, int lastch)
|
|
|
|
{
|
2012-08-18 12:27:19 +00:00
|
|
|
bool ishex = false;
|
|
|
|
|
|
|
|
int ch = lastch;
|
|
|
|
|
|
|
|
/* parse a number... */
|
2012-11-25 21:35:41 +00:00
|
|
|
if (ch == '.')
|
|
|
|
lex->tok.ttype = TOKEN_FLOATCONST;
|
|
|
|
else
|
|
|
|
lex->tok.ttype = TOKEN_INTCONST;
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch != '.' && !isdigit(ch))
|
|
|
|
{
|
|
|
|
if (lastch != '0' || ch != 'x')
|
|
|
|
{
|
|
|
|
/* end of the number or EOF */
|
|
|
|
lex_ungetch(lex, ch);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.constval.i = lastch - '0';
|
|
|
|
return lex->tok.ttype;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ishex = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* EOF would have been caught above */
|
|
|
|
|
|
|
|
if (ch != '.')
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (isdigit(ch) || (ishex && isxdigit_only(ch)))
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* NOT else, '.' can come from above as well */
|
2012-11-25 21:35:41 +00:00
|
|
|
if (lex->tok.ttype != TOKEN_FLOATCONST && ch == '.' && !ishex)
|
2012-08-18 12:27:19 +00:00
|
|
|
{
|
|
|
|
/* Allow floating comma in non-hex mode */
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_FLOATCONST;
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
/* continue digits-only */
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (isdigit(ch))
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* put back the last character */
|
|
|
|
/* but do not put back the trailing 'f' or a float */
|
2012-08-24 15:37:55 +00:00
|
|
|
if (lex->tok.ttype == TOKEN_FLOATCONST && ch == 'f')
|
2012-08-18 12:27:19 +00:00
|
|
|
ch = lex_getch(lex);
|
|
|
|
|
|
|
|
/* generally we don't want words to follow numbers: */
|
|
|
|
if (isident(ch)) {
|
|
|
|
lexerror(lex, "unexpected trailing characters after number");
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
if (lex->tok.ttype == TOKEN_FLOATCONST)
|
|
|
|
lex->tok.constval.f = strtod(lex->tok.value, NULL);
|
2012-08-18 12:27:19 +00:00
|
|
|
else
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.constval.i = strtol(lex->tok.value, NULL, 0);
|
|
|
|
return lex->tok.ttype;
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int lex_do(lex_file *lex)
|
|
|
|
{
|
2012-11-23 13:39:05 +00:00
|
|
|
int ch, nextch, thirdch;
|
2012-11-30 20:51:18 +00:00
|
|
|
bool hadwhite = false;
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex_token_new(lex);
|
|
|
|
#if 0
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!lex->tok)
|
|
|
|
return TOKEN_FATAL;
|
2012-08-24 15:37:55 +00:00
|
|
|
#endif
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-11-16 19:29:20 +00:00
|
|
|
while (true) {
|
2012-11-30 20:51:18 +00:00
|
|
|
ch = lex_skipwhite(lex, hadwhite);
|
|
|
|
hadwhite = true;
|
2012-11-16 19:29:20 +00:00
|
|
|
if (!lex->flags.mergelines || ch != '\\')
|
|
|
|
break;
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (ch != '\n') {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
ch = '\\';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* we reached a linemerge */
|
2012-11-16 22:13:53 +00:00
|
|
|
lex_tokench(lex, '\n');
|
2012-11-16 19:29:20 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-11-02 17:28:54 +00:00
|
|
|
if (lex->flags.preprocessing && (ch == TOKEN_WHITE || ch == TOKEN_EOL || ch == TOKEN_FATAL)) {
|
2012-11-01 22:22:58 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
|
|
|
}
|
|
|
|
|
2012-11-25 16:50:31 +00:00
|
|
|
lex->sline = lex->line;
|
|
|
|
lex->tok.ctx.line = lex->sline;
|
|
|
|
lex->tok.ctx.file = lex->name;
|
|
|
|
|
2012-08-20 16:12:04 +00:00
|
|
|
if (lex->eof)
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_FATAL);
|
2012-08-20 16:12:04 +00:00
|
|
|
|
|
|
|
if (ch == EOF) {
|
|
|
|
lex->eof = true;
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_EOF);
|
2012-08-20 16:12:04 +00:00
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
/* modelgen / spiritgen commands */
|
2012-11-30 14:44:45 +00:00
|
|
|
if (ch == '$' && !lex->flags.preprocessing) {
|
2012-08-18 12:27:19 +00:00
|
|
|
const char *v;
|
|
|
|
size_t frame;
|
|
|
|
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
if (!isident_start(ch)) {
|
|
|
|
lexerror(lex, "hanging '$' modelgen/spritegen command line");
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!lex_finish_ident(lex))
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-18 12:27:19 +00:00
|
|
|
/* skip the known commands */
|
2012-08-24 15:37:55 +00:00
|
|
|
v = lex->tok.value;
|
2012-08-16 18:47:31 +00:00
|
|
|
|
|
|
|
if (!strcmp(v, "frame") || !strcmp(v, "framesave"))
|
|
|
|
{
|
|
|
|
/* frame/framesave command works like an enum
|
|
|
|
* similar to fteqcc we handle this in the lexer.
|
|
|
|
* The reason for this is that it is sensitive to newlines,
|
|
|
|
* which the parser is unaware of
|
|
|
|
*/
|
|
|
|
if (!lex_finish_frames(lex))
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-08-16 18:47:31 +00:00
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(v, "framevalue"))
|
|
|
|
{
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (ch != EOF && isspace(ch) && ch != '\n')
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
|
|
|
|
if (!isdigit(ch)) {
|
|
|
|
lexerror(lex, "$framevalue requires an integer parameter");
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex_token_new(lex);
|
|
|
|
lex->tok.ttype = lex_finish_digit(lex, ch);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
if (lex->tok.ttype != TOKEN_INTCONST) {
|
2012-08-16 18:47:31 +00:00
|
|
|
lexerror(lex, "$framevalue requires an integer parameter");
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->framevalue = lex->tok.constval.i;
|
2012-08-16 18:47:31 +00:00
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
2012-08-16 19:02:56 +00:00
|
|
|
if (!strcmp(v, "framerestore"))
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex_token_new(lex);
|
2012-08-16 19:02:56 +00:00
|
|
|
|
|
|
|
rc = lex_parse_frame(lex);
|
|
|
|
|
|
|
|
if (rc > 0) {
|
|
|
|
lexerror(lex, "$framerestore requires a framename parameter");
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
if (rc < 0)
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_FATAL);
|
2012-08-16 19:02:56 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
v = lex->tok.value;
|
2012-11-15 17:48:38 +00:00
|
|
|
for (frame = 0; frame < vec_size(lex->frames); ++frame) {
|
2012-08-16 19:02:56 +00:00
|
|
|
if (!strcmp(v, lex->frames[frame].name)) {
|
|
|
|
lex->framevalue = lex->frames[frame].value;
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lexerror(lex, "unknown framename `%s`", v);
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(v, "modelname"))
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex_token_new(lex);
|
2012-08-16 19:02:56 +00:00
|
|
|
|
|
|
|
rc = lex_parse_frame(lex);
|
|
|
|
|
|
|
|
if (rc > 0) {
|
2012-11-15 17:48:38 +00:00
|
|
|
lexerror(lex, "$modelname requires a parameter");
|
2012-08-16 19:02:56 +00:00
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
if (rc < 0)
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_FATAL);
|
2012-08-16 19:02:56 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
v = lex->tok.value;
|
2012-08-16 19:02:56 +00:00
|
|
|
if (lex->modelname) {
|
|
|
|
frame_macro m;
|
|
|
|
m.value = lex->framevalue;
|
|
|
|
m.name = lex->modelname;
|
|
|
|
lex->modelname = NULL;
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_push(lex->frames, m);
|
2012-08-16 19:02:56 +00:00
|
|
|
}
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->modelname = lex->tok.value;
|
|
|
|
lex->tok.value = NULL;
|
2012-08-16 19:02:56 +00:00
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
2012-08-16 18:47:31 +00:00
|
|
|
if (!strcmp(v, "flush"))
|
|
|
|
{
|
2012-11-22 19:46:13 +00:00
|
|
|
size_t fi;
|
|
|
|
for (fi = 0; fi < vec_size(lex->frames); ++fi)
|
|
|
|
mem_d(lex->frames[fi].name);
|
2012-11-15 17:48:38 +00:00
|
|
|
vec_free(lex->frames);
|
2012-08-18 12:27:19 +00:00
|
|
|
/* skip line (fteqcc does it too) */
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (ch != EOF && ch != '\n')
|
|
|
|
ch = lex_getch(lex);
|
2012-08-16 18:47:31 +00:00
|
|
|
return lex_do(lex);
|
|
|
|
}
|
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!strcmp(v, "cd") ||
|
|
|
|
!strcmp(v, "origin") ||
|
|
|
|
!strcmp(v, "base") ||
|
|
|
|
!strcmp(v, "flags") ||
|
|
|
|
!strcmp(v, "scale") ||
|
|
|
|
!strcmp(v, "skin"))
|
|
|
|
{
|
|
|
|
/* skip line */
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
while (ch != EOF && ch != '\n')
|
|
|
|
ch = lex_getch(lex);
|
|
|
|
return lex_do(lex);
|
|
|
|
}
|
2012-08-16 18:49:58 +00:00
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
for (frame = 0; frame < vec_size(lex->frames); ++frame) {
|
2012-08-16 18:49:58 +00:00
|
|
|
if (!strcmp(v, lex->frames[frame].name)) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.constval.i = lex->frames[frame].value;
|
|
|
|
return (lex->tok.ttype = TOKEN_INTCONST);
|
2012-08-16 18:49:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lexerror(lex, "invalid frame macro");
|
|
|
|
return lex_do(lex);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* single-character tokens */
|
|
|
|
switch (ch)
|
|
|
|
{
|
2012-11-11 17:57:02 +00:00
|
|
|
case '[':
|
2012-08-18 12:27:19 +00:00
|
|
|
case '(':
|
2012-11-21 19:36:42 +00:00
|
|
|
case ':':
|
2012-11-21 19:42:48 +00:00
|
|
|
case '?':
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-18 12:27:19 +00:00
|
|
|
if (lex->flags.noops)
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
else
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_OPERATOR);
|
2012-08-18 12:27:19 +00:00
|
|
|
case ')':
|
|
|
|
case ';':
|
|
|
|
case '{':
|
|
|
|
case '}':
|
|
|
|
case ']':
|
|
|
|
|
|
|
|
case '#':
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-11-25 21:35:41 +00:00
|
|
|
if (ch == '.') {
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
/* digits starting with a dot */
|
|
|
|
if (isdigit(nextch)) {
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
lex->tok.ttype = lex_finish_digit(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
|
|
|
return lex->tok.ttype;
|
|
|
|
}
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
}
|
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
if (lex->flags.noops)
|
|
|
|
{
|
|
|
|
/* Detect characters early which are normally
|
|
|
|
* operators OR PART of an operator.
|
|
|
|
*/
|
|
|
|
switch (ch)
|
|
|
|
{
|
2012-11-25 17:07:19 +00:00
|
|
|
/*
|
2012-08-18 12:27:19 +00:00
|
|
|
case '+':
|
|
|
|
case '-':
|
2012-11-25 17:07:19 +00:00
|
|
|
*/
|
2012-08-18 12:27:19 +00:00
|
|
|
case '*':
|
|
|
|
case '/':
|
|
|
|
case '<':
|
|
|
|
case '>':
|
|
|
|
case '=':
|
|
|
|
case '&':
|
|
|
|
case '|':
|
|
|
|
case '^':
|
|
|
|
case '~':
|
|
|
|
case ',':
|
|
|
|
case '!':
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2012-08-23 09:12:32 +00:00
|
|
|
|
|
|
|
if (ch == '.')
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-23 09:12:32 +00:00
|
|
|
/* peak ahead once */
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
if (nextch != '.') {
|
|
|
|
lex_ungetch(lex, nextch);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
2012-08-23 09:12:32 +00:00
|
|
|
}
|
|
|
|
/* peak ahead again */
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
if (nextch != '.') {
|
|
|
|
lex_ungetch(lex, nextch);
|
2012-11-25 16:43:24 +00:00
|
|
|
lex_ungetch(lex, '.');
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = ch);
|
2012-08-23 09:12:32 +00:00
|
|
|
}
|
|
|
|
/* fill the token to be "..." */
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_DOTS);
|
2012-08-23 09:12:32 +00:00
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == ',' || ch == '.') {
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_OPERATOR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == '+' || ch == '-' || /* ++, --, +=, -= and -> as well! */
|
|
|
|
ch == '>' || ch == '<' || /* <<, >>, <=, >= */
|
|
|
|
ch == '=' || ch == '!' || /* ==, != */
|
|
|
|
ch == '&' || ch == '|') /* &&, ||, &=, |= */
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
nextch = lex_getch(lex);
|
2012-11-25 22:24:39 +00:00
|
|
|
if (nextch == '=' || (nextch == ch && ch != '!')) {
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, nextch);
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (ch == '-' && nextch == '>') {
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, nextch);
|
2012-11-23 13:39:05 +00:00
|
|
|
} else if (ch == '&' && nextch == '~') {
|
|
|
|
thirdch = lex_getch(lex);
|
|
|
|
if (thirdch != '=') {
|
|
|
|
lex_ungetch(lex, thirdch);
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
lex_tokench(lex, nextch);
|
|
|
|
lex_tokench(lex, thirdch);
|
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
} else
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_OPERATOR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
2012-07-16 11:59:10 +00:00
|
|
|
|
2012-08-16 14:06:12 +00:00
|
|
|
/*
|
2012-08-18 12:27:19 +00:00
|
|
|
if (ch == '^' || ch == '~' || ch == '!')
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_OPERATOR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (ch == '*' || ch == '/') /* *=, /= */
|
|
|
|
{
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
|
|
|
nextch = lex_getch(lex);
|
|
|
|
if (nextch == '=') {
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, nextch);
|
2012-08-18 12:27:19 +00:00
|
|
|
} else
|
|
|
|
lex_ungetch(lex, nextch);
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_OPERATOR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isident_start(ch))
|
|
|
|
{
|
|
|
|
const char *v;
|
|
|
|
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_tokench(lex, ch);
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!lex_finish_ident(lex)) {
|
|
|
|
/* error? */
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_IDENT;
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
v = lex->tok.value;
|
2012-08-18 12:27:19 +00:00
|
|
|
if (!strcmp(v, "void")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_VOID;
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (!strcmp(v, "int")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_INTEGER;
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (!strcmp(v, "float")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_FLOAT;
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (!strcmp(v, "string")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_STRING;
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (!strcmp(v, "entity")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_ENTITY;
|
2012-08-18 12:27:19 +00:00
|
|
|
} else if (!strcmp(v, "vector")) {
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = TOKEN_TYPENAME;
|
|
|
|
lex->tok.constval.t = TYPE_VECTOR;
|
2012-11-19 17:57:37 +00:00
|
|
|
} else {
|
|
|
|
size_t kw;
|
|
|
|
for (kw = 0; kw < num_keywords_qc; ++kw) {
|
|
|
|
if (!strcmp(v, keywords_qc[kw]))
|
|
|
|
return (lex->tok.ttype = TOKEN_KEYWORD);
|
|
|
|
}
|
2012-12-06 12:23:53 +00:00
|
|
|
if (opts.standard != COMPILER_QCC) {
|
2012-11-19 17:57:37 +00:00
|
|
|
for (kw = 0; kw < num_keywords_fg; ++kw) {
|
|
|
|
if (!strcmp(v, keywords_fg[kw]))
|
|
|
|
return (lex->tok.ttype = TOKEN_KEYWORD);
|
|
|
|
}
|
2012-11-01 13:20:58 +00:00
|
|
|
}
|
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
return lex->tok.ttype;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == '"')
|
|
|
|
{
|
2012-11-01 13:05:14 +00:00
|
|
|
lex->flags.nodigraphs = true;
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->flags.preprocessing)
|
|
|
|
lex_tokench(lex, ch);
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = lex_finish_string(lex, '"');
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->flags.preprocessing)
|
|
|
|
lex_tokench(lex, ch);
|
2012-11-02 17:48:32 +00:00
|
|
|
while (!lex->flags.preprocessing && lex->tok.ttype == TOKEN_STRINGCONST)
|
2012-08-18 12:27:19 +00:00
|
|
|
{
|
|
|
|
/* Allow c style "string" "continuation" */
|
2012-11-30 20:51:18 +00:00
|
|
|
ch = lex_skipwhite(lex, false);
|
2012-08-18 12:27:19 +00:00
|
|
|
if (ch != '"') {
|
|
|
|
lex_ungetch(lex, ch);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = lex_finish_string(lex, '"');
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
2012-11-01 13:05:14 +00:00
|
|
|
lex->flags.nodigraphs = false;
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return lex->tok.ttype;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == '\'')
|
|
|
|
{
|
|
|
|
/* we parse character constants like string,
|
|
|
|
* but return TOKEN_CHARCONST, or a vector type if it fits...
|
|
|
|
* Likewise actual unescaping has to be done by the parser.
|
|
|
|
* The difference is we don't allow 'char' 'continuation'.
|
|
|
|
*/
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->flags.preprocessing)
|
|
|
|
lex_tokench(lex, ch);
|
2012-11-02 17:48:32 +00:00
|
|
|
lex->tok.ttype = lex_finish_string(lex, '\'');
|
2012-11-15 17:48:38 +00:00
|
|
|
if (lex->flags.preprocessing)
|
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-11-25 21:04:27 +00:00
|
|
|
lex->tok.ttype = TOKEN_CHARCONST;
|
2012-08-18 12:27:19 +00:00
|
|
|
/* It's a vector if we can successfully scan 3 floats */
|
2012-08-16 11:44:53 +00:00
|
|
|
#ifdef WIN32
|
2012-11-02 17:48:32 +00:00
|
|
|
if (sscanf_s(lex->tok.value, " %f %f %f ",
|
|
|
|
&lex->tok.constval.v.x, &lex->tok.constval.v.y, &lex->tok.constval.v.z) == 3)
|
2012-08-16 11:44:53 +00:00
|
|
|
#else
|
2012-11-02 17:48:32 +00:00
|
|
|
if (sscanf(lex->tok.value, " %f %f %f ",
|
|
|
|
&lex->tok.constval.v.x, &lex->tok.constval.v.y, &lex->tok.constval.v.z) == 3)
|
2012-08-16 11:44:53 +00:00
|
|
|
#endif
|
2012-11-10 20:53:37 +00:00
|
|
|
|
2012-11-02 17:48:32 +00:00
|
|
|
{
|
|
|
|
lex->tok.ttype = TOKEN_VECTORCONST;
|
|
|
|
}
|
2012-11-25 21:11:21 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!lex->flags.preprocessing && strlen(lex->tok.value) > 1) {
|
|
|
|
if (lexwarn(lex, WARN_MULTIBYTE_CHARACTER, "multibyte character: `%s`", lex->tok.value))
|
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
|
|
|
}
|
|
|
|
lex->tok.constval.i = lex->tok.value[0];
|
|
|
|
}
|
2012-08-18 12:27:19 +00:00
|
|
|
|
2012-11-02 17:48:32 +00:00
|
|
|
return lex->tok.ttype;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isdigit(ch))
|
|
|
|
{
|
2012-08-24 15:37:55 +00:00
|
|
|
lex->tok.ttype = lex_finish_digit(lex, ch);
|
2012-11-15 17:48:38 +00:00
|
|
|
lex_endtoken(lex);
|
2012-08-24 15:37:55 +00:00
|
|
|
return lex->tok.ttype;
|
2012-08-18 12:27:19 +00:00
|
|
|
}
|
|
|
|
|
2012-11-30 14:38:03 +00:00
|
|
|
if (lex->flags.preprocessing) {
|
|
|
|
lex_tokench(lex, ch);
|
|
|
|
lex_endtoken(lex);
|
|
|
|
return (lex->tok.ttype = ch);
|
|
|
|
}
|
|
|
|
|
2012-08-18 12:27:19 +00:00
|
|
|
lexerror(lex, "unknown token");
|
2012-08-24 15:37:55 +00:00
|
|
|
return (lex->tok.ttype = TOKEN_ERROR);
|
2012-07-16 11:59:10 +00:00
|
|
|
}
|