Tone down the current token list, in favor of adding them later.

This commit is contained in:
Danila Fedorin 2018-02-17 13:32:55 -08:00
parent 841c2f15e5
commit a3ce8fbd9c
2 changed files with 3 additions and 35 deletions

View File

@ -54,21 +54,11 @@ struct libab_lexer_match_s {
enum libab_lexer_token_e { enum libab_lexer_token_e {
TOKEN_CHAR = 0, TOKEN_CHAR = 0,
TOKEN_ID, TOKEN_ID,
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NUM, TOKEN_NUM,
TOKEN_STR,
TOKEN_CHAR_LIT,
TOKEN_KW_FUN,
TOKEN_KW_IF,
TOKEN_KW_ELSE,
TOKEN_KW_WHILE,
TOKEN_KW_DO,
TOKEN_KW_FOR,
TOKEN_KW_RETURN,
TOKEN_OP_INFIX, TOKEN_OP_INFIX,
TOKEN_OP_POSTFIX,
TOKEN_OP_PREFIX, TOKEN_OP_PREFIX,
TOKEN_OP_POSTFIX,
TOKEN_FUN,
TOKEN_LAST TOKEN_LAST
}; };

View File

@ -10,34 +10,12 @@ libab_result libab_lexer_init(libab_lexer* lexer) {
const char* words[] = { const char* words[] = {
".", ".",
"[a-zA-Z][a-zA-Z0-9_]*", "[a-zA-Z][a-zA-Z0-9_]*",
"true", "[0-9]+(\\.[0-9]*)?"
"false",
"[0-9]+(\\.[0-9]*)?",
"\"[^\"]*\"",
"'[^']'",
"fun",
"if",
"else",
"while",
"do",
"for",
"return"
}; };
libab_lexer_token tokens[] = { libab_lexer_token tokens[] = {
TOKEN_CHAR, TOKEN_CHAR,
TOKEN_ID, TOKEN_ID,
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NUM, TOKEN_NUM,
TOKEN_STR,
TOKEN_CHAR_LIT,
TOKEN_KW_FUN,
TOKEN_KW_IF,
TOKEN_KW_ELSE,
TOKEN_KW_WHILE,
TOKEN_KW_DO,
TOKEN_KW_FOR,
TOKEN_KW_RETURN
}; };
const size_t count = sizeof(tokens)/sizeof(libab_lexer_token); const size_t count = sizeof(tokens)/sizeof(libab_lexer_token);