diff --git a/include/lexer.h b/include/lexer.h index aa6b389..de3b30b 100644 --- a/include/lexer.h +++ b/include/lexer.h @@ -54,21 +54,11 @@ struct libab_lexer_match_s { enum libab_lexer_token_e { TOKEN_CHAR = 0, TOKEN_ID, - TOKEN_TRUE, - TOKEN_FALSE, TOKEN_NUM, - TOKEN_STR, - TOKEN_CHAR_LIT, - TOKEN_KW_FUN, - TOKEN_KW_IF, - TOKEN_KW_ELSE, - TOKEN_KW_WHILE, - TOKEN_KW_DO, - TOKEN_KW_FOR, - TOKEN_KW_RETURN, TOKEN_OP_INFIX, - TOKEN_OP_POSTFIX, TOKEN_OP_PREFIX, + TOKEN_OP_POSTFIX, + TOKEN_FUN, TOKEN_LAST }; diff --git a/src/lexer.c b/src/lexer.c index f6e0b34..23c71a1 100644 --- a/src/lexer.c +++ b/src/lexer.c @@ -10,34 +10,12 @@ libab_result libab_lexer_init(libab_lexer* lexer) { const char* words[] = { ".", "[a-zA-Z][a-zA-Z0-9_]*", - "true", - "false", - "[0-9]+(\\.[0-9]*)?", - "\"[^\"]*\"", - "'[^']'", - "fun", - "if", - "else", - "while", - "do", - "for", - "return" + "[0-9]+(\\.[0-9]*)?" }; libab_lexer_token tokens[] = { TOKEN_CHAR, TOKEN_ID, - TOKEN_TRUE, - TOKEN_FALSE, TOKEN_NUM, - TOKEN_STR, - TOKEN_CHAR_LIT, - TOKEN_KW_FUN, - TOKEN_KW_IF, - TOKEN_KW_ELSE, - TOKEN_KW_WHILE, - TOKEN_KW_DO, - TOKEN_KW_FOR, - TOKEN_KW_RETURN }; const size_t count = sizeof(tokens)/sizeof(libab_lexer_token);