Compare commits
187 Commits
margin-rew
...
donations
| Author | SHA1 | Date | |
|---|---|---|---|
| d5f478b3c6 | |||
| 0f96b93532 | |||
| 5449affbc8 | |||
| 2cf19900db | |||
| efe5d08430 | |||
| 994e9ed8d2 | |||
| 72af5cb7f0 | |||
| 308ee34025 | |||
| 9839befdf1 | |||
| d688df6c92 | |||
| 24eef25984 | |||
| 77ae0be899 | |||
| ca939da28e | |||
| 5d0920cb6d | |||
| d1ea7b5364 | |||
| ebdb986e2a | |||
| 4bb6695c2e | |||
| a6c5a42c1d | |||
| c44c718d06 | |||
| 5e4097453b | |||
| bfeae89ab5 | |||
| 755364c0df | |||
| dcb1e9a736 | |||
| c8543961af | |||
| cbad3b76eb | |||
| b3ff2fe135 | |||
| 6a6f25547e | |||
| 43dfee56cc | |||
| 6f9a2ce092 | |||
| 06014eade9 | |||
| 6f92a50c83 | |||
| 60eb50737d | |||
| 250746e686 | |||
| 3bac151b08 | |||
| c61d9ccb99 | |||
| 56ad03b833 | |||
| 2f9e6278ba | |||
| 17e0fbc6fb | |||
| 7ee7feadf3 | |||
| b36ea558a3 | |||
| 17d6a75465 | |||
| d5541bc985 | |||
| 98a46e9fd4 | |||
| 2e3074df00 | |||
| b3dc3e690b | |||
| b1943ede2f | |||
| 0467e4e12f | |||
| 8164624cee | |||
| e0451d026c | |||
| 1f1345477f | |||
| 44529e872f | |||
| a10996954e | |||
| 4d1dfb5f66 | |||
| f97b624688 | |||
| 8215c59122 | |||
| eb97bd9c3e | |||
| d2e100fe4b | |||
| de09a1f6bd | |||
| c40672e762 | |||
| 565d4a6955 | |||
| 8f0f2eb35e | |||
| 234b795157 | |||
| e317c56c99 | |||
| 29d12a9914 | |||
| b459e9cbfe | |||
| 52abe73ef7 | |||
| f0fe481bcf | |||
| 222446a937 | |||
| e7edd43034 | |||
| 2bc2c282e1 | |||
| 5cc92d3a9d | |||
| 4be8a25699 | |||
| d3421733e1 | |||
| 4c099a54e8 | |||
| 9f77f07ed2 | |||
| 04ab1a137c | |||
| 53744ac772 | |||
| 50a1c33adb | |||
| d153af5212 | |||
| a336b27b6c | |||
| 97eb4b6e3e | |||
| 430768eac5 | |||
| 5db864881a | |||
| d3b1047d37 | |||
| 98cac103c4 | |||
| 7226d66f67 | |||
| 8a352ed3ea | |||
| 02f8306c7b | |||
| cf6f353f20 | |||
| 7a631b3557 | |||
| 5e13047846 | |||
| c17d532802 | |||
| 55e4e61906 | |||
| f2f88ab9ca | |||
| ba418d357f | |||
| 0e3f16139d | |||
| 55486d511f | |||
| 6080094c41 | |||
| 6b8d3b0f8a | |||
| 725958137a | |||
| 1f6b4bef74 | |||
| fe1e0a6de0 | |||
| 1f3c42fc44 | |||
| 8bf67c7dc3 | |||
| 13214cee96 | |||
| 579c7bad92 | |||
| f00a6a7783 | |||
| 2a81fdd9fb | |||
| 17c59e595c | |||
| ad2576eae2 | |||
| 72d8179cc5 | |||
| dbabec0db6 | |||
| 76675fbc9b | |||
| ca395b5c09 | |||
| 1a05d5ff7a | |||
| 56f0dbd02f | |||
| 9fc0ff961d | |||
| 73441dc93b | |||
| df5f5eba1c | |||
| d950b8dc90 | |||
| 85394b185d | |||
| 86b49f9cc3 | |||
| 9769b3e396 | |||
| e337992410 | |||
| d5c3a44041 | |||
| eade42be49 | |||
| d0fac50cfd | |||
| dd4aa6fb9d | |||
| aa867b2e5f | |||
| 2fa2be4b9e | |||
| d5536467f6 | |||
| 67cb61c93f | |||
| 578d580683 | |||
| 789f277780 | |||
| 308ec615b9 | |||
| 0e40c9e216 | |||
| 5dbf75b5e4 | |||
| b921ddfc8d | |||
| bf3c81fe24 | |||
| 06cbd93f05 | |||
| 6c3780d9ea | |||
| 6f0667bb28 | |||
| 8368283a3e | |||
| 18ee3a1526 | |||
| b0e501f086 | |||
| 385ae59133 | |||
| 49469bdf12 | |||
| 020417e971 | |||
| eff0de5330 | |||
| b219f6855e | |||
| 068d0218b0 | |||
| 65215ccdd6 | |||
| 3e9f6a14f2 | |||
| 7623787b1c | |||
| e15daa8f6d | |||
| 298cf6599c | |||
| 841930a8ef | |||
| 9b37e496cb | |||
| 58e6ad9e79 | |||
| 3aa2a6783e | |||
| d64a0d1fcd | |||
| ba141031dd | |||
| ebdc63f5a0 | |||
| 5af0a09714 | |||
| 8a2bc2660c | |||
| e59b8cf403 | |||
| b078ef9a22 | |||
| fdaec6d5a9 | |||
| b631346379 | |||
| e9f2378b47 | |||
| 7d2f78d25c | |||
| 1f734a613c | |||
| a3c299b057 | |||
| 12aedfce92 | |||
| 65645346a2 | |||
| cb65e89e53 | |||
| 6a2fec8ef4 | |||
| aa59c90810 | |||
| 2b317930a0 | |||
| e7d56dd4bd | |||
| a4fedb276d | |||
| 277c0a2ce6 | |||
| ef3c61e9e6 | |||
| 1908126607 | |||
| 2d77f8489f | |||
| 0371651fdd | |||
| 01734d24f7 |
9
.gitmodules
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
[submodule "code/aoc-2020"]
|
||||
path = code/aoc-2020
|
||||
url = https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020.git
|
||||
[submodule "code/libabacus"]
|
||||
path = code/libabacus
|
||||
url = https://dev.danilafe.com/Experiments/libabacus
|
||||
[submodule "themes/vanilla"]
|
||||
path = themes/vanilla
|
||||
url = https://dev.danilafe.com/Web-Projects/vanilla-hugo.git
|
||||
36
assets/scss/donate.scss
Normal file
@@ -0,0 +1,36 @@
|
||||
@import "../../themes/vanilla/assets/scss/mixins.scss";
|
||||
|
||||
.donation-methods {
|
||||
padding: 0;
|
||||
border: none;
|
||||
border-spacing: 0 0.5rem;
|
||||
|
||||
td {
|
||||
padding: 0;
|
||||
overflow: hidden;
|
||||
|
||||
&:first-child {
|
||||
@include bordered-block;
|
||||
text-align: right;
|
||||
border-right: none;
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
padding-left: 0.5em;
|
||||
padding-right: 0.5rem;
|
||||
}
|
||||
|
||||
&:last-child {
|
||||
@include bordered-block;
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
}
|
||||
|
||||
code {
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
border: none;
|
||||
display: inline-block;
|
||||
padding: 0.25rem;
|
||||
}
|
||||
}
|
||||
11
assets/scss/gametheory.scss
Normal file
@@ -0,0 +1,11 @@
|
||||
@import "variables.scss";
|
||||
@import "mixins.scss";
|
||||
|
||||
.assumption-number {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.assumption {
|
||||
@include bordered-block;
|
||||
padding: 0.8rem;
|
||||
}
|
||||
1
code/aoc-2020
Submodule
53
code/compiler/13/CMakeLists.txt
Normal file
@@ -0,0 +1,53 @@
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
project(compiler)
|
||||
|
||||
# We want C++17 for std::optional
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
# Find all the required packages
|
||||
find_package(BISON)
|
||||
find_package(FLEX)
|
||||
find_package(LLVM REQUIRED CONFIG)
|
||||
|
||||
# Set up the flex and bison targets
|
||||
bison_target(parser
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/parser.y
|
||||
${CMAKE_CURRENT_BINARY_DIR}/parser.cpp
|
||||
COMPILE_FLAGS "-d")
|
||||
flex_target(scanner
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/scanner.l
|
||||
${CMAKE_CURRENT_BINARY_DIR}/scanner.cpp)
|
||||
add_flex_bison_dependency(scanner parser)
|
||||
|
||||
# Find all the relevant LLVM components
|
||||
llvm_map_components_to_libnames(LLVM_LIBS core x86asmparser x86codegen)
|
||||
|
||||
# Create compiler executable
|
||||
add_executable(compiler
|
||||
definition.cpp definition.hpp
|
||||
parsed_type.cpp parsed_type.hpp
|
||||
ast.cpp ast.hpp
|
||||
llvm_context.cpp llvm_context.hpp
|
||||
type_env.cpp type_env.hpp
|
||||
env.cpp env.hpp
|
||||
type.cpp type.hpp
|
||||
error.cpp error.hpp
|
||||
binop.cpp binop.hpp
|
||||
instruction.cpp instruction.hpp
|
||||
graph.cpp graph.hpp
|
||||
global_scope.cpp global_scope.hpp
|
||||
parse_driver.cpp parse_driver.hpp
|
||||
mangler.cpp mangler.hpp
|
||||
compiler.cpp compiler.hpp
|
||||
${BISON_parser_OUTPUTS}
|
||||
${FLEX_scanner_OUTPUTS}
|
||||
main.cpp
|
||||
)
|
||||
|
||||
# Configure compiler executable
|
||||
target_include_directories(compiler PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_include_directories(compiler PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
||||
target_include_directories(compiler PUBLIC ${LLVM_INCLUDE_DIRS})
|
||||
target_compile_definitions(compiler PUBLIC ${LLVM_DEFINITIONS})
|
||||
target_link_libraries(compiler ${LLVM_LIBS})
|
||||
454
code/compiler/13/ast.cpp
Normal file
@@ -0,0 +1,454 @@
|
||||
#include "ast.hpp"
|
||||
#include <ostream>
|
||||
#include <type_traits>
|
||||
#include "binop.hpp"
|
||||
#include "error.hpp"
|
||||
#include "instruction.hpp"
|
||||
#include "type.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "env.hpp"
|
||||
|
||||
static void print_indent(int n, std::ostream& to) {
|
||||
while(n--) to << " ";
|
||||
}
|
||||
|
||||
void ast_int::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "INT: " << value << std::endl;
|
||||
}
|
||||
|
||||
void ast_int::find_free(std::set<std::string>& into) {
|
||||
|
||||
}
|
||||
|
||||
type_ptr ast_int::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
return type_ptr(new type_app(env->lookup_type("Int")));
|
||||
}
|
||||
|
||||
void ast_int::translate(global_scope& scope) {
|
||||
|
||||
}
|
||||
|
||||
void ast_int::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
into.push_back(instruction_ptr(new instruction_pushint(value)));
|
||||
}
|
||||
|
||||
void ast_lid::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "LID: " << id << std::endl;
|
||||
}
|
||||
|
||||
void ast_lid::find_free(std::set<std::string>& into) {
|
||||
into.insert(id);
|
||||
}
|
||||
|
||||
type_ptr ast_lid::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
type_scheme_ptr lid_type = env->lookup(id);
|
||||
if(!lid_type)
|
||||
throw type_error("unknown identifier " + id, loc);
|
||||
return lid_type->instantiate(mgr);
|
||||
}
|
||||
|
||||
void ast_lid::translate(global_scope& scope) {
|
||||
|
||||
}
|
||||
|
||||
void ast_lid::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
into.push_back(instruction_ptr(
|
||||
(this->env->is_global(id)) ?
|
||||
(instruction*) new instruction_pushglobal(this->env->get_mangled_name(id)) :
|
||||
(instruction*) new instruction_push(env->get_offset(id))));
|
||||
}
|
||||
|
||||
void ast_uid::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "UID: " << id << std::endl;
|
||||
}
|
||||
|
||||
void ast_uid::find_free(std::set<std::string>& into) {
|
||||
|
||||
}
|
||||
|
||||
type_ptr ast_uid::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
type_scheme_ptr uid_type = env->lookup(id);
|
||||
if(!uid_type)
|
||||
throw type_error("unknown constructor " + id, loc);
|
||||
return uid_type->instantiate(mgr);
|
||||
}
|
||||
|
||||
void ast_uid::translate(global_scope& scope) {
|
||||
|
||||
}
|
||||
|
||||
void ast_uid::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
into.push_back(instruction_ptr(
|
||||
new instruction_pushglobal(this->env->get_mangled_name(id))));
|
||||
}
|
||||
|
||||
void ast_binop::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "BINOP: " << op_name(op) << std::endl;
|
||||
left->print(indent + 1, to);
|
||||
right->print(indent + 1, to);
|
||||
}
|
||||
|
||||
void ast_binop::find_free(std::set<std::string>& into) {
|
||||
left->find_free(into);
|
||||
right->find_free(into);
|
||||
}
|
||||
|
||||
type_ptr ast_binop::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
type_ptr ltype = left->typecheck(mgr, env);
|
||||
type_ptr rtype = right->typecheck(mgr, env);
|
||||
type_ptr ftype = env->lookup(op_name(op))->instantiate(mgr);
|
||||
if(!ftype) throw type_error("unknown binary operator " + op_name(op), loc);
|
||||
|
||||
// For better type errors, we first require binary function,
|
||||
// and only then unify each argument. This way, we can
|
||||
// precisely point out which argument is "wrong".
|
||||
|
||||
type_ptr return_type = mgr.new_type();
|
||||
type_ptr second_type = mgr.new_type();
|
||||
type_ptr first_type = mgr.new_type();
|
||||
type_ptr arrow_one = type_ptr(new type_arr(second_type, return_type));
|
||||
type_ptr arrow_two = type_ptr(new type_arr(first_type, arrow_one));
|
||||
|
||||
mgr.unify(ftype, arrow_two, loc);
|
||||
mgr.unify(first_type, ltype, left->loc);
|
||||
mgr.unify(second_type, rtype, right->loc);
|
||||
return return_type;
|
||||
}
|
||||
|
||||
void ast_binop::translate(global_scope& scope) {
|
||||
left->translate(scope);
|
||||
right->translate(scope);
|
||||
}
|
||||
|
||||
void ast_binop::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
right->compile(env, into);
|
||||
left->compile(env_ptr(new env_offset(1, env)), into);
|
||||
|
||||
auto mangled_name = this->env->get_mangled_name(op_name(op));
|
||||
into.push_back(instruction_ptr(new instruction_pushglobal(mangled_name)));
|
||||
into.push_back(instruction_ptr(new instruction_mkapp()));
|
||||
into.push_back(instruction_ptr(new instruction_mkapp()));
|
||||
}
|
||||
|
||||
void ast_app::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "APP:" << std::endl;
|
||||
left->print(indent + 1, to);
|
||||
right->print(indent + 1, to);
|
||||
}
|
||||
|
||||
void ast_app::find_free(std::set<std::string>& into) {
|
||||
left->find_free(into);
|
||||
right->find_free(into);
|
||||
}
|
||||
|
||||
type_ptr ast_app::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
type_ptr ltype = left->typecheck(mgr, env);
|
||||
type_ptr rtype = right->typecheck(mgr, env);
|
||||
|
||||
type_ptr return_type = mgr.new_type();
|
||||
type_ptr arrow = type_ptr(new type_arr(rtype, return_type));
|
||||
mgr.unify(arrow, ltype, left->loc);
|
||||
return return_type;
|
||||
}
|
||||
|
||||
void ast_app::translate(global_scope& scope) {
|
||||
left->translate(scope);
|
||||
right->translate(scope);
|
||||
}
|
||||
|
||||
void ast_app::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
right->compile(env, into);
|
||||
left->compile(env_ptr(new env_offset(1, env)), into);
|
||||
into.push_back(instruction_ptr(new instruction_mkapp()));
|
||||
}
|
||||
|
||||
void ast_case::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "CASE: " << std::endl;
|
||||
for(auto& branch : branches) {
|
||||
print_indent(indent + 1, to);
|
||||
branch->pat->print(to);
|
||||
to << std::endl;
|
||||
branch->expr->print(indent + 2, to);
|
||||
}
|
||||
}
|
||||
|
||||
void ast_case::find_free(std::set<std::string>& into) {
|
||||
of->find_free(into);
|
||||
for(auto& branch : branches) {
|
||||
std::set<std::string> free_in_branch;
|
||||
std::set<std::string> pattern_variables;
|
||||
branch->pat->find_variables(pattern_variables);
|
||||
branch->expr->find_free(free_in_branch);
|
||||
for(auto& free : free_in_branch) {
|
||||
if(pattern_variables.find(free) == pattern_variables.end())
|
||||
into.insert(free);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type_ptr ast_case::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
type_var* var;
|
||||
type_ptr case_type = mgr.resolve(of->typecheck(mgr, env), var);
|
||||
type_ptr branch_type = mgr.new_type();
|
||||
|
||||
for(auto& branch : branches) {
|
||||
type_env_ptr new_env = type_scope(env);
|
||||
branch->pat->typecheck(case_type, mgr, new_env);
|
||||
type_ptr curr_branch_type = branch->expr->typecheck(mgr, new_env);
|
||||
mgr.unify(curr_branch_type, branch_type, branch->expr->loc);
|
||||
}
|
||||
|
||||
input_type = mgr.resolve(case_type, var);
|
||||
type_app* app_type;
|
||||
if(!(app_type = dynamic_cast<type_app*>(input_type.get())) ||
|
||||
!dynamic_cast<type_data*>(app_type->constructor.get())) {
|
||||
throw type_error("attempting case analysis of non-data type");
|
||||
}
|
||||
|
||||
return branch_type;
|
||||
}
|
||||
|
||||
void ast_case::translate(global_scope& scope) {
|
||||
of->translate(scope);
|
||||
for(auto& branch : branches) {
|
||||
branch->expr->translate(scope);
|
||||
}
|
||||
}
|
||||
|
||||
void ast_case::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
type_app* app_type = dynamic_cast<type_app*>(input_type.get());
|
||||
type_data* type = dynamic_cast<type_data*>(app_type->constructor.get());
|
||||
|
||||
of->compile(env, into);
|
||||
into.push_back(instruction_ptr(new instruction_eval()));
|
||||
|
||||
instruction_jump* jump_instruction = new instruction_jump();
|
||||
into.push_back(instruction_ptr(jump_instruction));
|
||||
for(auto& branch : branches) {
|
||||
std::vector<instruction_ptr> branch_instructions;
|
||||
pattern_var* vpat;
|
||||
pattern_constr* cpat;
|
||||
|
||||
if((vpat = dynamic_cast<pattern_var*>(branch->pat.get()))) {
|
||||
branch->expr->compile(env_ptr(new env_offset(1, env)), branch_instructions);
|
||||
|
||||
for(auto& constr_pair : type->constructors) {
|
||||
if(jump_instruction->tag_mappings.find(constr_pair.second.tag) !=
|
||||
jump_instruction->tag_mappings.end())
|
||||
break;
|
||||
|
||||
jump_instruction->tag_mappings[constr_pair.second.tag] =
|
||||
jump_instruction->branches.size();
|
||||
}
|
||||
jump_instruction->branches.push_back(std::move(branch_instructions));
|
||||
} else if((cpat = dynamic_cast<pattern_constr*>(branch->pat.get()))) {
|
||||
env_ptr new_env = env;
|
||||
for(auto it = cpat->params.rbegin(); it != cpat->params.rend(); it++) {
|
||||
new_env = env_ptr(new env_var(*it, new_env));
|
||||
}
|
||||
|
||||
branch_instructions.push_back(instruction_ptr(new instruction_split(
|
||||
cpat->params.size())));
|
||||
branch->expr->compile(new_env, branch_instructions);
|
||||
branch_instructions.push_back(instruction_ptr(new instruction_slide(
|
||||
cpat->params.size())));
|
||||
|
||||
int new_tag = type->constructors[cpat->constr].tag;
|
||||
if(jump_instruction->tag_mappings.find(new_tag) !=
|
||||
jump_instruction->tag_mappings.end())
|
||||
throw type_error("technically not a type error: duplicate pattern");
|
||||
|
||||
jump_instruction->tag_mappings[new_tag] =
|
||||
jump_instruction->branches.size();
|
||||
jump_instruction->branches.push_back(std::move(branch_instructions));
|
||||
}
|
||||
}
|
||||
|
||||
for(auto& constr_pair : type->constructors) {
|
||||
if(jump_instruction->tag_mappings.find(constr_pair.second.tag) ==
|
||||
jump_instruction->tag_mappings.end())
|
||||
throw type_error("non-total pattern");
|
||||
}
|
||||
}
|
||||
|
||||
void ast_let::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "LET: " << std::endl;
|
||||
in->print(indent + 1, to);
|
||||
}
|
||||
|
||||
void ast_let::find_free(std::set<std::string>& into) {
|
||||
definitions.find_free(into);
|
||||
std::set<std::string> all_free;
|
||||
in->find_free(all_free);
|
||||
for(auto& free_var : all_free) {
|
||||
if(definitions.defs_defn.find(free_var) == definitions.defs_defn.end())
|
||||
into.insert(free_var);
|
||||
}
|
||||
}
|
||||
|
||||
type_ptr ast_let::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
definitions.typecheck(mgr, env);
|
||||
return in->typecheck(mgr, definitions.env);
|
||||
}
|
||||
|
||||
void ast_let::translate(global_scope& scope) {
|
||||
for(auto& def : definitions.defs_data) {
|
||||
def.second->into_globals(scope);
|
||||
}
|
||||
for(auto& def : definitions.defs_defn) {
|
||||
size_t original_params = def.second->params.size();
|
||||
std::string original_name = def.second->name;
|
||||
auto& global_definition = def.second->into_global(scope);
|
||||
size_t captured = global_definition.params.size() - original_params;
|
||||
|
||||
type_env_ptr mangled_env = type_scope(env);
|
||||
mangled_env->bind(def.first, env->lookup(def.first), visibility::global);
|
||||
mangled_env->set_mangled_name(def.first, global_definition.name);
|
||||
|
||||
ast_ptr global_app(new ast_lid(original_name));
|
||||
global_app->env = mangled_env;
|
||||
for(auto& param : global_definition.params) {
|
||||
if(!(captured--)) break;
|
||||
ast_ptr new_arg(new ast_lid(param));
|
||||
new_arg->env = env;
|
||||
global_app = ast_ptr(new ast_app(std::move(global_app), std::move(new_arg)));
|
||||
global_app->env = env;
|
||||
}
|
||||
translated_definitions.push_back({ def.first, std::move(global_app) });
|
||||
}
|
||||
in->translate(scope);
|
||||
}
|
||||
|
||||
void ast_let::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
into.push_back(instruction_ptr(new instruction_alloc(translated_definitions.size())));
|
||||
env_ptr new_env = env;
|
||||
for(auto& def : translated_definitions) {
|
||||
new_env = env_ptr(new env_var(def.first, std::move(new_env)));
|
||||
}
|
||||
int offset = translated_definitions.size() - 1;
|
||||
for(auto& def : translated_definitions) {
|
||||
def.second->compile(new_env, into);
|
||||
into.push_back(instruction_ptr(new instruction_update(offset--)));
|
||||
}
|
||||
in->compile(new_env, into);
|
||||
into.push_back(instruction_ptr(new instruction_slide(translated_definitions.size())));
|
||||
}
|
||||
|
||||
void ast_lambda::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "LAMBDA";
|
||||
for(auto& param : params) {
|
||||
to << " " << param;
|
||||
}
|
||||
to << std::endl;
|
||||
body->print(indent+1, to);
|
||||
}
|
||||
|
||||
void ast_lambda::find_free(std::set<std::string>& into) {
|
||||
body->find_free(free_variables);
|
||||
for(auto& param : params) {
|
||||
free_variables.erase(param);
|
||||
}
|
||||
into.insert(free_variables.begin(), free_variables.end());
|
||||
}
|
||||
|
||||
type_ptr ast_lambda::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = env;
|
||||
var_env = type_scope(env);
|
||||
type_ptr return_type = mgr.new_type();
|
||||
type_ptr full_type = return_type;
|
||||
|
||||
for(auto it = params.rbegin(); it != params.rend(); it++) {
|
||||
type_ptr param_type = mgr.new_type();
|
||||
var_env->bind(*it, param_type);
|
||||
full_type = type_ptr(new type_arr(std::move(param_type), full_type));
|
||||
}
|
||||
|
||||
mgr.unify(return_type, body->typecheck(mgr, var_env), body->loc);
|
||||
return full_type;
|
||||
}
|
||||
|
||||
void ast_lambda::translate(global_scope& scope) {
|
||||
std::vector<std::string> function_params;
|
||||
for(auto& free_variable : free_variables) {
|
||||
if(env->is_global(free_variable)) continue;
|
||||
function_params.push_back(free_variable);
|
||||
}
|
||||
size_t captured_count = function_params.size();
|
||||
function_params.insert(function_params.end(), params.begin(), params.end());
|
||||
|
||||
auto& new_function = scope.add_function("lambda", std::move(function_params), std::move(body));
|
||||
type_env_ptr mangled_env = type_scope(env);
|
||||
mangled_env->bind("lambda", type_scheme_ptr(nullptr), visibility::global);
|
||||
mangled_env->set_mangled_name("lambda", new_function.name);
|
||||
ast_ptr new_application = ast_ptr(new ast_lid("lambda"));
|
||||
new_application->env = mangled_env;
|
||||
|
||||
for(auto& param : new_function.params) {
|
||||
if(!(captured_count--)) break;
|
||||
ast_ptr new_arg = ast_ptr(new ast_lid(param));
|
||||
new_arg->env = env;
|
||||
new_application = ast_ptr(new ast_app(std::move(new_application), std::move(new_arg)));
|
||||
new_application->env = env;
|
||||
}
|
||||
translated = std::move(new_application);
|
||||
}
|
||||
|
||||
void ast_lambda::compile(const env_ptr& env, std::vector<instruction_ptr>& into) const {
|
||||
translated->compile(env, into);
|
||||
}
|
||||
|
||||
void pattern_var::print(std::ostream& to) const {
|
||||
to << var;
|
||||
}
|
||||
|
||||
void pattern_var::find_variables(std::set<std::string>& into) const {
|
||||
into.insert(var);
|
||||
}
|
||||
|
||||
void pattern_var::typecheck(type_ptr t, type_mgr& mgr, type_env_ptr& env) const {
|
||||
env->bind(var, t);
|
||||
}
|
||||
|
||||
void pattern_constr::print(std::ostream& to) const {
|
||||
to << constr;
|
||||
for(auto& param : params) {
|
||||
to << " " << param;
|
||||
}
|
||||
}
|
||||
|
||||
void pattern_constr::find_variables(std::set<std::string>& into) const {
|
||||
into.insert(params.begin(), params.end());
|
||||
}
|
||||
|
||||
void pattern_constr::typecheck(type_ptr t, type_mgr& mgr, type_env_ptr& env) const {
|
||||
type_scheme_ptr constructor_type_scheme = env->lookup(constr);
|
||||
if(!constructor_type_scheme) {
|
||||
throw type_error("pattern using unknown constructor " + constr, loc);
|
||||
}
|
||||
type_ptr constructor_type = constructor_type_scheme->instantiate(mgr);
|
||||
|
||||
for(auto& param : params) {
|
||||
type_arr* arr = dynamic_cast<type_arr*>(constructor_type.get());
|
||||
if(!arr) throw type_error("too many parameters in constructor pattern", loc);
|
||||
|
||||
env->bind(param, arr->left);
|
||||
constructor_type = arr->right;
|
||||
}
|
||||
|
||||
mgr.unify(t, constructor_type, loc);
|
||||
}
|
||||
195
code/compiler/13/ast.hpp
Normal file
@@ -0,0 +1,195 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include "type.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "binop.hpp"
|
||||
#include "instruction.hpp"
|
||||
#include "env.hpp"
|
||||
#include "definition.hpp"
|
||||
#include "location.hh"
|
||||
#include "global_scope.hpp"
|
||||
|
||||
struct ast {
|
||||
type_env_ptr env;
|
||||
yy::location loc;
|
||||
|
||||
ast(yy::location l) : env(nullptr), loc(std::move(l)) {}
|
||||
virtual ~ast() = default;
|
||||
|
||||
virtual void print(int indent, std::ostream& to) const = 0;
|
||||
virtual void find_free(std::set<std::string>& into) = 0;
|
||||
virtual type_ptr typecheck(type_mgr& mgr, type_env_ptr& env) = 0;
|
||||
virtual void translate(global_scope& scope) = 0;
|
||||
virtual void compile(const env_ptr& env,
|
||||
std::vector<instruction_ptr>& into) const = 0;
|
||||
};
|
||||
|
||||
using ast_ptr = std::unique_ptr<ast>;
|
||||
|
||||
struct pattern {
|
||||
yy::location loc;
|
||||
|
||||
pattern(yy::location l) : loc(std::move(l)) {}
|
||||
virtual ~pattern() = default;
|
||||
|
||||
virtual void print(std::ostream& to) const = 0;
|
||||
virtual void find_variables(std::set<std::string>& into) const = 0;
|
||||
virtual void typecheck(type_ptr t, type_mgr& mgr, type_env_ptr& env) const = 0;
|
||||
};
|
||||
|
||||
using pattern_ptr = std::unique_ptr<pattern>;
|
||||
|
||||
struct branch {
|
||||
pattern_ptr pat;
|
||||
ast_ptr expr;
|
||||
|
||||
branch(pattern_ptr p, ast_ptr a)
|
||||
: pat(std::move(p)), expr(std::move(a)) {}
|
||||
};
|
||||
|
||||
using branch_ptr = std::unique_ptr<branch>;
|
||||
|
||||
struct ast_int : public ast {
|
||||
int value;
|
||||
|
||||
explicit ast_int(int v, yy::location l = yy::location())
|
||||
: ast(std::move(l)), value(v) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_lid : public ast {
|
||||
std::string id;
|
||||
|
||||
explicit ast_lid(std::string i, yy::location l = yy::location())
|
||||
: ast(std::move(l)), id(std::move(i)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_uid : public ast {
|
||||
std::string id;
|
||||
|
||||
explicit ast_uid(std::string i, yy::location l = yy::location())
|
||||
: ast(std::move(l)), id(std::move(i)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_binop : public ast {
|
||||
binop op;
|
||||
ast_ptr left;
|
||||
ast_ptr right;
|
||||
|
||||
ast_binop(binop o, ast_ptr l, ast_ptr r, yy::location lc = yy::location())
|
||||
: ast(std::move(lc)), op(o), left(std::move(l)), right(std::move(r)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_app : public ast {
|
||||
ast_ptr left;
|
||||
ast_ptr right;
|
||||
|
||||
ast_app(ast_ptr l, ast_ptr r, yy::location lc = yy::location())
|
||||
: ast(std::move(lc)), left(std::move(l)), right(std::move(r)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_case : public ast {
|
||||
ast_ptr of;
|
||||
type_ptr input_type;
|
||||
std::vector<branch_ptr> branches;
|
||||
|
||||
ast_case(ast_ptr o, std::vector<branch_ptr> b, yy::location l = yy::location())
|
||||
: ast(std::move(l)), of(std::move(o)), branches(std::move(b)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_let : public ast {
|
||||
using basic_definition = std::pair<std::string, ast_ptr>;
|
||||
|
||||
definition_group definitions;
|
||||
ast_ptr in;
|
||||
|
||||
std::vector<basic_definition> translated_definitions;
|
||||
|
||||
ast_let(definition_group g, ast_ptr i, yy::location l = yy::location())
|
||||
: ast(std::move(l)), definitions(std::move(g)), in(std::move(i)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct ast_lambda : public ast {
|
||||
std::vector<std::string> params;
|
||||
ast_ptr body;
|
||||
|
||||
type_env_ptr var_env;
|
||||
|
||||
std::set<std::string> free_variables;
|
||||
ast_ptr translated;
|
||||
|
||||
ast_lambda(std::vector<std::string> ps, ast_ptr b, yy::location l = yy::location())
|
||||
: ast(std::move(l)), params(std::move(ps)), body(std::move(b)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void find_free(std::set<std::string>& into);
|
||||
type_ptr typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
void translate(global_scope& scope);
|
||||
void compile(const env_ptr& env, std::vector<instruction_ptr>& into) const;
|
||||
};
|
||||
|
||||
struct pattern_var : public pattern {
|
||||
std::string var;
|
||||
|
||||
pattern_var(std::string v, yy::location l = yy::location())
|
||||
: pattern(std::move(l)), var(std::move(v)) {}
|
||||
|
||||
void print(std::ostream &to) const;
|
||||
void find_variables(std::set<std::string>& into) const;
|
||||
void typecheck(type_ptr t, type_mgr& mgr, type_env_ptr& env) const;
|
||||
};
|
||||
|
||||
struct pattern_constr : public pattern {
|
||||
std::string constr;
|
||||
std::vector<std::string> params;
|
||||
|
||||
pattern_constr(std::string c, std::vector<std::string> p, yy::location l = yy::location())
|
||||
: pattern(std::move(l)), constr(std::move(c)), params(std::move(p)) {}
|
||||
|
||||
void print(std::ostream &to) const;
|
||||
void find_variables(std::set<std::string>& into) const;
|
||||
virtual void typecheck(type_ptr t, type_mgr& mgr, type_env_ptr& env) const;
|
||||
};
|
||||
21
code/compiler/13/binop.cpp
Normal file
@@ -0,0 +1,21 @@
|
||||
#include "binop.hpp"
|
||||
|
||||
std::string op_name(binop op) {
|
||||
switch(op) {
|
||||
case PLUS: return "+";
|
||||
case MINUS: return "-";
|
||||
case TIMES: return "*";
|
||||
case DIVIDE: return "/";
|
||||
}
|
||||
return "??";
|
||||
}
|
||||
|
||||
std::string op_action(binop op) {
|
||||
switch(op) {
|
||||
case PLUS: return "plus";
|
||||
case MINUS: return "minus";
|
||||
case TIMES: return "times";
|
||||
case DIVIDE: return "divide";
|
||||
}
|
||||
return "??";
|
||||
}
|
||||
17
code/compiler/13/binop.hpp
Normal file
@@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
#include <array>
|
||||
#include <string>
|
||||
|
||||
enum binop {
|
||||
PLUS,
|
||||
MINUS,
|
||||
TIMES,
|
||||
DIVIDE
|
||||
};
|
||||
|
||||
constexpr binop all_binops[] = {
|
||||
PLUS, MINUS, TIMES, DIVIDE
|
||||
};
|
||||
|
||||
std::string op_name(binop op);
|
||||
std::string op_action(binop op);
|
||||
153
code/compiler/13/compiler.cpp
Normal file
@@ -0,0 +1,153 @@
|
||||
#include "compiler.hpp"
|
||||
#include "binop.hpp"
|
||||
#include "error.hpp"
|
||||
#include "global_scope.hpp"
|
||||
#include "parse_driver.hpp"
|
||||
#include "type.hpp"
|
||||
#include "type_env.hpp"
|
||||
|
||||
#include "llvm/IR/LegacyPassManager.h"
|
||||
#include "llvm/IR/Verifier.h"
|
||||
#include "llvm/Support/TargetSelect.h"
|
||||
#include "llvm/Support/TargetRegistry.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Support/FileSystem.h"
|
||||
#include "llvm/Target/TargetOptions.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
|
||||
void compiler::add_default_types() {
|
||||
global_env->bind_type("Int", type_ptr(new type_base("Int")));
|
||||
}
|
||||
|
||||
void compiler::add_binop_type(binop op, type_ptr type) {
|
||||
auto name = mng.new_mangled_name(op_action(op));
|
||||
global_env->bind(op_name(op), std::move(type), visibility::global);
|
||||
global_env->set_mangled_name(op_name(op), name);
|
||||
}
|
||||
|
||||
void compiler::add_default_function_types() {
|
||||
type_ptr int_type = global_env->lookup_type("Int");
|
||||
assert(int_type != nullptr);
|
||||
type_ptr int_type_app = type_ptr(new type_app(int_type));
|
||||
|
||||
type_ptr closed_int_op_type(
|
||||
new type_arr(int_type_app, type_ptr(new type_arr(int_type_app, int_type_app))));
|
||||
|
||||
constexpr binop closed_ops[] = { PLUS, MINUS, TIMES, DIVIDE };
|
||||
for(auto& op : closed_ops) add_binop_type(op, closed_int_op_type);
|
||||
}
|
||||
|
||||
void compiler::parse() {
|
||||
if(!driver())
|
||||
throw compiler_error("failed to open file");
|
||||
}
|
||||
|
||||
void compiler::typecheck() {
|
||||
std::set<std::string> free_variables;
|
||||
global_defs.find_free(free_variables);
|
||||
global_defs.typecheck(type_m, global_env);
|
||||
}
|
||||
|
||||
void compiler::translate() {
|
||||
for(auto& data : global_defs.defs_data) {
|
||||
data.second->into_globals(global_scp);
|
||||
}
|
||||
for(auto& defn : global_defs.defs_defn) {
|
||||
auto& function = defn.second->into_global(global_scp);
|
||||
defn.second->env->set_mangled_name(defn.first, function.name);
|
||||
}
|
||||
}
|
||||
|
||||
void compiler::compile() {
|
||||
global_scp.compile();
|
||||
}
|
||||
|
||||
void compiler::create_llvm_binop(binop op) {
|
||||
auto new_function =
|
||||
ctx.create_custom_function(global_env->get_mangled_name(op_name(op)), 2);
|
||||
std::vector<instruction_ptr> instructions;
|
||||
instructions.push_back(instruction_ptr(new instruction_push(1)));
|
||||
instructions.push_back(instruction_ptr(new instruction_eval()));
|
||||
instructions.push_back(instruction_ptr(new instruction_push(1)));
|
||||
instructions.push_back(instruction_ptr(new instruction_eval()));
|
||||
instructions.push_back(instruction_ptr(new instruction_binop(op)));
|
||||
instructions.push_back(instruction_ptr(new instruction_update(2)));
|
||||
instructions.push_back(instruction_ptr(new instruction_pop(2)));
|
||||
ctx.get_builder().SetInsertPoint(&new_function->getEntryBlock());
|
||||
for(auto& instruction : instructions) {
|
||||
instruction->gen_llvm(ctx, new_function);
|
||||
}
|
||||
ctx.get_builder().CreateRetVoid();
|
||||
}
|
||||
|
||||
void compiler::generate_llvm() {
|
||||
for(auto op : all_binops) {
|
||||
create_llvm_binop(op);
|
||||
}
|
||||
|
||||
global_scp.generate_llvm(ctx);
|
||||
}
|
||||
|
||||
void compiler::output_llvm(const std::string& into) {
|
||||
std::string targetTriple = llvm::sys::getDefaultTargetTriple();
|
||||
|
||||
llvm::InitializeNativeTarget();
|
||||
llvm::InitializeNativeTargetAsmParser();
|
||||
llvm::InitializeNativeTargetAsmPrinter();
|
||||
|
||||
std::string error;
|
||||
const llvm::Target* target =
|
||||
llvm::TargetRegistry::lookupTarget(targetTriple, error);
|
||||
if (!target) {
|
||||
std::cerr << error << std::endl;
|
||||
} else {
|
||||
std::string cpu = "generic";
|
||||
std::string features = "";
|
||||
llvm::TargetOptions options;
|
||||
std::unique_ptr<llvm::TargetMachine> targetMachine(
|
||||
target->createTargetMachine(targetTriple, cpu, features,
|
||||
options, llvm::Optional<llvm::Reloc::Model>()));
|
||||
|
||||
ctx.get_module().setDataLayout(targetMachine->createDataLayout());
|
||||
ctx.get_module().setTargetTriple(targetTriple);
|
||||
|
||||
std::error_code ec;
|
||||
llvm::raw_fd_ostream file(into, ec, llvm::sys::fs::F_None);
|
||||
if (ec) {
|
||||
throw compiler_error("failed to open object file for writing");
|
||||
} else {
|
||||
llvm::CodeGenFileType type = llvm::CGFT_ObjectFile;
|
||||
llvm::legacy::PassManager pm;
|
||||
if (targetMachine->addPassesToEmitFile(pm, file, NULL, type)) {
|
||||
throw compiler_error("failed to add passes to pass manager");
|
||||
} else {
|
||||
pm.run(ctx.get_module());
|
||||
file.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
compiler::compiler(const std::string& filename)
|
||||
: file_m(), global_defs(), driver(file_m, global_defs, filename),
|
||||
global_env(new type_env), type_m(), mng(), global_scp(mng), ctx() {
|
||||
add_default_types();
|
||||
add_default_function_types();
|
||||
}
|
||||
|
||||
void compiler::operator()(const std::string& into) {
|
||||
parse();
|
||||
typecheck();
|
||||
translate();
|
||||
compile();
|
||||
generate_llvm();
|
||||
output_llvm(into);
|
||||
}
|
||||
|
||||
file_mgr& compiler::get_file_manager() {
|
||||
return file_m;
|
||||
}
|
||||
|
||||
type_mgr& compiler::get_type_manager() {
|
||||
return type_m;
|
||||
}
|
||||
37
code/compiler/13/compiler.hpp
Normal file
@@ -0,0 +1,37 @@
|
||||
#pragma once
|
||||
#include "binop.hpp"
|
||||
#include "parse_driver.hpp"
|
||||
#include "definition.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "type.hpp"
|
||||
#include "global_scope.hpp"
|
||||
#include "mangler.hpp"
|
||||
#include "llvm_context.hpp"
|
||||
|
||||
class compiler {
|
||||
private:
|
||||
file_mgr file_m;
|
||||
definition_group global_defs;
|
||||
parse_driver driver;
|
||||
type_env_ptr global_env;
|
||||
type_mgr type_m;
|
||||
mangler mng;
|
||||
global_scope global_scp;
|
||||
llvm_context ctx;
|
||||
|
||||
void add_default_types();
|
||||
void add_binop_type(binop op, type_ptr type);
|
||||
void add_default_function_types();
|
||||
void parse();
|
||||
void typecheck();
|
||||
void translate();
|
||||
void compile();
|
||||
void create_llvm_binop(binop op);
|
||||
void generate_llvm();
|
||||
void output_llvm(const std::string& into);
|
||||
public:
|
||||
compiler(const std::string& filename);
|
||||
void operator()(const std::string& into);
|
||||
file_mgr& get_file_manager();
|
||||
type_mgr& get_type_manager();
|
||||
};
|
||||
148
code/compiler/13/definition.cpp
Normal file
@@ -0,0 +1,148 @@
|
||||
#include "definition.hpp"
|
||||
#include <cassert>
|
||||
#include "error.hpp"
|
||||
#include "ast.hpp"
|
||||
#include "instruction.hpp"
|
||||
#include "llvm_context.hpp"
|
||||
#include "type.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "graph.hpp"
|
||||
#include <llvm/IR/DerivedTypes.h>
|
||||
#include <llvm/IR/Function.h>
|
||||
#include <llvm/IR/Type.h>
|
||||
|
||||
void definition_defn::find_free() {
|
||||
body->find_free(free_variables);
|
||||
for(auto& param : params) {
|
||||
free_variables.erase(param);
|
||||
}
|
||||
}
|
||||
|
||||
void definition_defn::insert_types(type_mgr& mgr, type_env_ptr& env, visibility v) {
|
||||
this->env = env;
|
||||
var_env = type_scope(env);
|
||||
return_type = mgr.new_type();
|
||||
full_type = return_type;
|
||||
|
||||
for(auto it = params.rbegin(); it != params.rend(); it++) {
|
||||
type_ptr param_type = mgr.new_type();
|
||||
full_type = type_ptr(new type_arr(param_type, full_type));
|
||||
var_env->bind(*it, param_type);
|
||||
}
|
||||
env->bind(name, full_type, v);
|
||||
}
|
||||
|
||||
void definition_defn::typecheck(type_mgr& mgr) {
|
||||
type_ptr body_type = body->typecheck(mgr, var_env);
|
||||
mgr.unify(return_type, body_type);
|
||||
}
|
||||
|
||||
|
||||
global_function& definition_defn::into_global(global_scope& scope) {
|
||||
std::vector<std::string> all_params;
|
||||
for(auto& free : free_variables) {
|
||||
if(env->is_global(free)) continue;
|
||||
all_params.push_back(free);
|
||||
}
|
||||
all_params.insert(all_params.end(), params.begin(), params.end());
|
||||
body->translate(scope);
|
||||
return scope.add_function(name, std::move(all_params), std::move(body));
|
||||
}
|
||||
|
||||
void definition_data::insert_types(type_env_ptr& env) {
|
||||
this->env = env;
|
||||
env->bind_type(name, type_ptr(new type_data(name, vars.size())));
|
||||
}
|
||||
|
||||
void definition_data::insert_constructors() const {
|
||||
type_ptr this_type_ptr = env->lookup_type(name);
|
||||
type_data* this_type = static_cast<type_data*>(this_type_ptr.get());
|
||||
int next_tag = 0;
|
||||
|
||||
std::set<std::string> var_set;
|
||||
type_app* return_app = new type_app(std::move(this_type_ptr));
|
||||
type_ptr return_type(return_app);
|
||||
for(auto& var : vars) {
|
||||
if(var_set.find(var) != var_set.end())
|
||||
throw compiler_error(
|
||||
"type variable " + var +
|
||||
" used twice in data type definition.", loc);
|
||||
var_set.insert(var);
|
||||
return_app->arguments.push_back(type_ptr(new type_var(var)));
|
||||
}
|
||||
|
||||
for(auto& constructor : constructors) {
|
||||
constructor->tag = next_tag;
|
||||
this_type->constructors[constructor->name] = { next_tag++ };
|
||||
|
||||
type_ptr full_type = return_type;
|
||||
for(auto it = constructor->types.rbegin(); it != constructor->types.rend(); it++) {
|
||||
type_ptr type = (*it)->to_type(var_set, env);
|
||||
full_type = type_ptr(new type_arr(type, full_type));
|
||||
}
|
||||
|
||||
type_scheme_ptr full_scheme(new type_scheme(std::move(full_type)));
|
||||
full_scheme->forall.insert(full_scheme->forall.begin(), vars.begin(), vars.end());
|
||||
env->bind(constructor->name, full_scheme, visibility::global);
|
||||
}
|
||||
}
|
||||
|
||||
void definition_data::into_globals(global_scope& scope) {
|
||||
for(auto& constructor : constructors) {
|
||||
global_constructor& c = scope.add_constructor(
|
||||
constructor->name, constructor->tag, constructor->types.size());
|
||||
env->set_mangled_name(constructor->name, c.name);
|
||||
}
|
||||
}
|
||||
|
||||
void definition_group::find_free(std::set<std::string>& into) {
|
||||
for(auto& def_pair : defs_defn) {
|
||||
def_pair.second->find_free();
|
||||
for(auto& free_var : def_pair.second->free_variables) {
|
||||
if(defs_defn.find(free_var) == defs_defn.end()) {
|
||||
into.insert(free_var);
|
||||
} else {
|
||||
def_pair.second->nearby_variables.insert(free_var);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void definition_group::typecheck(type_mgr& mgr, type_env_ptr& env) {
|
||||
this->env = type_scope(env);
|
||||
|
||||
for(auto& def_data : defs_data) {
|
||||
def_data.second->insert_types(this->env);
|
||||
}
|
||||
for(auto& def_data : defs_data) {
|
||||
def_data.second->insert_constructors();
|
||||
}
|
||||
|
||||
function_graph dependency_graph;
|
||||
|
||||
for(auto& def_defn : defs_defn) {
|
||||
def_defn.second->find_free();
|
||||
dependency_graph.add_function(def_defn.second->name);
|
||||
|
||||
for(auto& dependency : def_defn.second->nearby_variables) {
|
||||
assert(defs_defn.find(dependency) != defs_defn.end());
|
||||
dependency_graph.add_edge(def_defn.second->name, dependency);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<group_ptr> groups = dependency_graph.compute_order();
|
||||
for(auto it = groups.rbegin(); it != groups.rend(); it++) {
|
||||
auto& group = *it;
|
||||
for(auto& def_defnn_name : group->members) {
|
||||
auto& def_defn = defs_defn.find(def_defnn_name)->second;
|
||||
def_defn->insert_types(mgr, this->env, vis);
|
||||
}
|
||||
for(auto& def_defnn_name : group->members) {
|
||||
auto& def_defn = defs_defn.find(def_defnn_name)->second;
|
||||
def_defn->typecheck(mgr);
|
||||
}
|
||||
for(auto& def_defnn_name : group->members) {
|
||||
this->env->generalize(def_defnn_name, *group, mgr);
|
||||
}
|
||||
}
|
||||
}
|
||||
91
code/compiler/13/definition.hpp
Normal file
@@ -0,0 +1,91 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include "instruction.hpp"
|
||||
#include "llvm_context.hpp"
|
||||
#include "parsed_type.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "location.hh"
|
||||
#include "global_scope.hpp"
|
||||
|
||||
struct ast;
|
||||
using ast_ptr = std::unique_ptr<ast>;
|
||||
|
||||
struct constructor {
|
||||
std::string name;
|
||||
std::vector<parsed_type_ptr> types;
|
||||
int8_t tag;
|
||||
|
||||
constructor(std::string n, std::vector<parsed_type_ptr> ts)
|
||||
: name(std::move(n)), types(std::move(ts)) {}
|
||||
};
|
||||
|
||||
using constructor_ptr = std::unique_ptr<constructor>;
|
||||
|
||||
struct definition_defn {
|
||||
std::string name;
|
||||
std::vector<std::string> params;
|
||||
ast_ptr body;
|
||||
yy::location loc;
|
||||
|
||||
type_env_ptr env;
|
||||
type_env_ptr var_env;
|
||||
std::set<std::string> free_variables;
|
||||
std::set<std::string> nearby_variables;
|
||||
type_ptr full_type;
|
||||
type_ptr return_type;
|
||||
|
||||
definition_defn(
|
||||
std::string n,
|
||||
std::vector<std::string> p,
|
||||
ast_ptr b,
|
||||
yy::location l = yy::location())
|
||||
: name(std::move(n)), params(std::move(p)), body(std::move(b)), loc(std::move(l)) {
|
||||
|
||||
}
|
||||
|
||||
void find_free();
|
||||
void insert_types(type_mgr& mgr, type_env_ptr& env, visibility v);
|
||||
void typecheck(type_mgr& mgr);
|
||||
|
||||
global_function& into_global(global_scope& scope);
|
||||
};
|
||||
|
||||
using definition_defn_ptr = std::unique_ptr<definition_defn>;
|
||||
|
||||
struct definition_data {
|
||||
std::string name;
|
||||
std::vector<std::string> vars;
|
||||
std::vector<constructor_ptr> constructors;
|
||||
yy::location loc;
|
||||
|
||||
type_env_ptr env;
|
||||
|
||||
definition_data(
|
||||
std::string n,
|
||||
std::vector<std::string> vs,
|
||||
std::vector<constructor_ptr> cs,
|
||||
yy::location l = yy::location())
|
||||
: name(std::move(n)), vars(std::move(vs)), constructors(std::move(cs)), loc(std::move(l)) {}
|
||||
|
||||
void insert_types(type_env_ptr& env);
|
||||
void insert_constructors() const;
|
||||
|
||||
void into_globals(global_scope& scope);
|
||||
};
|
||||
|
||||
using definition_data_ptr = std::unique_ptr<definition_data>;
|
||||
|
||||
struct definition_group {
|
||||
std::map<std::string, definition_data_ptr> defs_data;
|
||||
std::map<std::string, definition_defn_ptr> defs_defn;
|
||||
visibility vis;
|
||||
type_env_ptr env;
|
||||
|
||||
definition_group(visibility v = visibility::local) : vis(v) {}
|
||||
|
||||
void find_free(std::set<std::string>& into);
|
||||
void typecheck(type_mgr& mgr, type_env_ptr& env);
|
||||
};
|
||||
24
code/compiler/13/env.cpp
Normal file
@@ -0,0 +1,24 @@
|
||||
#include "env.hpp"
|
||||
#include <cassert>
|
||||
|
||||
int env_var::get_offset(const std::string& name) const {
|
||||
if(name == this->name) return 0;
|
||||
assert(parent != nullptr);
|
||||
return parent->get_offset(name) + 1;
|
||||
}
|
||||
|
||||
bool env_var::has_variable(const std::string& name) const {
|
||||
if(name == this->name) return true;
|
||||
if(parent) return parent->has_variable(name);
|
||||
return false;
|
||||
}
|
||||
|
||||
int env_offset::get_offset(const std::string& name) const {
|
||||
assert(parent != nullptr);
|
||||
return parent->get_offset(name) + offset;
|
||||
}
|
||||
|
||||
bool env_offset::has_variable(const std::string& name) const {
|
||||
if(parent) return parent->has_variable(name);
|
||||
return false;
|
||||
}
|
||||
39
code/compiler/13/env.hpp
Normal file
@@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
class env {
|
||||
public:
|
||||
virtual ~env() = default;
|
||||
|
||||
virtual int get_offset(const std::string& name) const = 0;
|
||||
virtual bool has_variable(const std::string& name) const = 0;
|
||||
};
|
||||
|
||||
using env_ptr = std::shared_ptr<env>;
|
||||
|
||||
class env_var : public env {
|
||||
private:
|
||||
std::string name;
|
||||
env_ptr parent;
|
||||
|
||||
public:
|
||||
env_var(std::string n, env_ptr p)
|
||||
: name(std::move(n)), parent(std::move(p)) {}
|
||||
|
||||
int get_offset(const std::string& name) const;
|
||||
bool has_variable(const std::string& name) const;
|
||||
};
|
||||
|
||||
class env_offset : public env {
|
||||
private:
|
||||
int offset;
|
||||
env_ptr parent;
|
||||
|
||||
public:
|
||||
env_offset(int o, env_ptr p)
|
||||
: offset(o), parent(std::move(p)) {}
|
||||
|
||||
int get_offset(const std::string& name) const;
|
||||
bool has_variable(const std::string& name) const;
|
||||
};
|
||||
41
code/compiler/13/error.cpp
Normal file
@@ -0,0 +1,41 @@
|
||||
#include "error.hpp"
|
||||
|
||||
const char* compiler_error::what() const noexcept {
|
||||
return "an error occured while compiling the program";
|
||||
}
|
||||
|
||||
void compiler_error::print_about(std::ostream& to) {
|
||||
to << what() << ": ";
|
||||
to << description << std::endl;
|
||||
}
|
||||
|
||||
void compiler_error::print_location(std::ostream& to, file_mgr& fm, bool highlight) {
|
||||
if(!loc) return;
|
||||
to << "occuring on line " << loc->begin.line << ":" << std::endl;
|
||||
fm.print_location(to, *loc, highlight);
|
||||
}
|
||||
|
||||
void compiler_error::pretty_print(std::ostream& to, file_mgr& fm) {
|
||||
print_about(to);
|
||||
print_location(to, fm);
|
||||
}
|
||||
|
||||
const char* type_error::what() const noexcept {
|
||||
return "an error occured while checking the types of the program";
|
||||
}
|
||||
|
||||
void type_error::pretty_print(std::ostream& to, file_mgr& fm) {
|
||||
print_about(to);
|
||||
print_location(to, fm, true);
|
||||
}
|
||||
|
||||
void unification_error::pretty_print(std::ostream& to, file_mgr& fm, type_mgr& mgr) {
|
||||
type_error::pretty_print(to, fm);
|
||||
to << "the expected type was:" << std::endl;
|
||||
to << " \033[34m";
|
||||
left->print(mgr, to);
|
||||
to << std::endl << "\033[0mwhile the actual type was:" << std::endl;
|
||||
to << " \033[32m";
|
||||
right->print(mgr, to);
|
||||
to << "\033[0m" << std::endl;
|
||||
}
|
||||
49
code/compiler/13/error.hpp
Normal file
@@ -0,0 +1,49 @@
|
||||
#pragma once
|
||||
#include <exception>
|
||||
#include <optional>
|
||||
#include "type.hpp"
|
||||
#include "location.hh"
|
||||
#include "parse_driver.hpp"
|
||||
|
||||
using maybe_location = std::optional<yy::location>;
|
||||
|
||||
class compiler_error : std::exception {
|
||||
private:
|
||||
std::string description;
|
||||
maybe_location loc;
|
||||
|
||||
public:
|
||||
compiler_error(std::string d, maybe_location l = std::nullopt)
|
||||
: description(std::move(d)), loc(std::move(l)) {}
|
||||
|
||||
const char* what() const noexcept override;
|
||||
|
||||
void print_about(std::ostream& to);
|
||||
void print_location(std::ostream& to, file_mgr& fm, bool highlight = false);
|
||||
|
||||
void pretty_print(std::ostream& to, file_mgr& fm);
|
||||
};
|
||||
|
||||
class type_error : compiler_error {
|
||||
private:
|
||||
|
||||
public:
|
||||
type_error(std::string d, maybe_location l = std::nullopt)
|
||||
: compiler_error(std::move(d), std::move(l)) {}
|
||||
|
||||
const char* what() const noexcept override;
|
||||
void pretty_print(std::ostream& to, file_mgr& fm);
|
||||
};
|
||||
|
||||
class unification_error : public type_error {
|
||||
private:
|
||||
type_ptr left;
|
||||
type_ptr right;
|
||||
|
||||
public:
|
||||
unification_error(type_ptr l, type_ptr r, maybe_location loc = std::nullopt)
|
||||
: left(std::move(l)), right(std::move(r)),
|
||||
type_error("failed to unify types", std::move(loc)) {}
|
||||
|
||||
void pretty_print(std::ostream& to, file_mgr& fm, type_mgr& mgr);
|
||||
};
|
||||
2
code/compiler/13/examples/bad1.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
data Bool = { True, False }
|
||||
defn main = { 3 + True }
|
||||
1
code/compiler/13/examples/bad2.txt
Normal file
@@ -0,0 +1 @@
|
||||
defn main = { 1 2 3 4 5 }
|
||||
8
code/compiler/13/examples/bad3.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn head l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x y z -> { x }
|
||||
}
|
||||
}
|
||||
6
code/compiler/13/examples/errors/double_catchall.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
defn main = {
|
||||
case True of {
|
||||
n -> { 2 }
|
||||
n -> { 1 }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
data Pair a a = { MkPair a a }
|
||||
7
code/compiler/13/examples/errors/exhausted_patterns.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
defn main = {
|
||||
case True of {
|
||||
True -> { 1 }
|
||||
False -> { 0 }
|
||||
n -> { 2 }
|
||||
}
|
||||
}
|
||||
5
code/compiler/13/examples/errors/incomplete_patterns.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
defn main = {
|
||||
case True of {
|
||||
True -> { 1 }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
defn add x y = { x + y }
|
||||
|
||||
defn main = {
|
||||
case add of {
|
||||
n -> { 1 }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
defn main = {
|
||||
case True of {
|
||||
n -> { 2 }
|
||||
True -> { 1 }
|
||||
False -> { 0 }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn head l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x -> { x }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn head l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x y z -> { x }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
defn main = {
|
||||
case True of {
|
||||
NotBool -> { 1 }
|
||||
True -> { 2 }
|
||||
}
|
||||
}
|
||||
1
code/compiler/13/examples/errors/type_redefinition.txt
Normal file
@@ -0,0 +1 @@
|
||||
data Bool = { True, False }
|
||||
3
code/compiler/13/examples/errors/unknown_lid.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
defn main = {
|
||||
weird 1
|
||||
}
|
||||
1
code/compiler/13/examples/errors/unknown_type.txt
Normal file
@@ -0,0 +1 @@
|
||||
data Wrapper = { Wrap Weird }
|
||||
1
code/compiler/13/examples/errors/unknown_type_param.txt
Normal file
@@ -0,0 +1 @@
|
||||
data Wrapper = { Wrap a }
|
||||
3
code/compiler/13/examples/errors/unknown_uid.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
defn main = {
|
||||
Weird 1
|
||||
}
|
||||
1
code/compiler/13/examples/errors/wrong_type_kind.txt
Normal file
@@ -0,0 +1 @@
|
||||
data Wrapper = { Wrap (Int Bool) }
|
||||
17
code/compiler/13/examples/fixpoint.txt
Normal file
@@ -0,0 +1,17 @@
|
||||
data List a = { Nil, Cons a (List a) }
|
||||
|
||||
defn fix f = { let { defn x = { f x } } in { x } }
|
||||
defn fixpointOnes fo = { Cons 1 fo }
|
||||
defn sumTwo l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x xs -> {
|
||||
x + case xs of {
|
||||
Nil -> { 0 }
|
||||
Cons y ys -> { y }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defn main = { sumTwo (fix fixpointOnes) }
|
||||
8
code/compiler/13/examples/if.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
data Bool = { True, False }
|
||||
defn if c t e = {
|
||||
case c of {
|
||||
True -> { t }
|
||||
False -> { e }
|
||||
}
|
||||
}
|
||||
defn main = { if (if True False True) 11 3 }
|
||||
19
code/compiler/13/examples/lambda.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
data List a = { Nil, Cons a (List a) }
|
||||
|
||||
defn sum l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x xs -> { x + sum xs}
|
||||
}
|
||||
}
|
||||
|
||||
defn map f l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { Cons (f x) (map f xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn main = {
|
||||
sum (map \x -> { x * x } (map (\x -> { x + x }) (Cons 1 (Cons 2 (Cons 3 Nil)))))
|
||||
}
|
||||
47
code/compiler/13/examples/letin.txt
Normal file
@@ -0,0 +1,47 @@
|
||||
data Bool = { True, False }
|
||||
|
||||
data List a = { Nil, Cons a (List a) }
|
||||
|
||||
defn if c t e = {
|
||||
case c of {
|
||||
True -> { t }
|
||||
False -> { e }
|
||||
}
|
||||
}
|
||||
|
||||
defn mergeUntil l r p = {
|
||||
let {
|
||||
defn mergeLeft nl nr = {
|
||||
case nl of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { if (p x) (Cons x (mergeRight xs nr)) Nil }
|
||||
}
|
||||
}
|
||||
defn mergeRight nl nr = {
|
||||
case nr of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { if (p x) (Cons x (mergeLeft nl xs)) Nil }
|
||||
}
|
||||
}
|
||||
} in {
|
||||
mergeLeft l r
|
||||
}
|
||||
}
|
||||
|
||||
defn const x y = { x }
|
||||
|
||||
defn sum l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x xs -> { x + sum xs }
|
||||
}
|
||||
}
|
||||
|
||||
defn main = {
|
||||
let {
|
||||
defn firstList = { Cons 1 (Cons 3 (Cons 5 Nil)) }
|
||||
defn secondList = { Cons 2 (Cons 4 (Cons 6 Nil)) }
|
||||
} in {
|
||||
sum (mergeUntil firstList secondList (const True))
|
||||
}
|
||||
}
|
||||
32
code/compiler/13/examples/list.txt
Normal file
@@ -0,0 +1,32 @@
|
||||
data List a = { Nil, Cons a (List a) }
|
||||
|
||||
defn map f l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { Cons (f x) (map f xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn foldl f b l = {
|
||||
case l of {
|
||||
Nil -> { b }
|
||||
Cons x xs -> { foldl f (f b x) xs }
|
||||
}
|
||||
}
|
||||
|
||||
defn foldr f b l = {
|
||||
case l of {
|
||||
Nil -> { b }
|
||||
Cons x xs -> { f x (foldr f b xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn list = { Cons 1 (Cons 2 (Cons 3 (Cons 4 Nil))) }
|
||||
|
||||
defn add x y = { x + y }
|
||||
defn sum l = { foldr add 0 l }
|
||||
|
||||
defn skipAdd x y = { y + 1 }
|
||||
defn length l = { foldr skipAdd 0 l }
|
||||
|
||||
defn main = { sum list + length list }
|
||||
25
code/compiler/13/examples/mutual_recursion.txt
Normal file
@@ -0,0 +1,25 @@
|
||||
data Bool = { True, False }
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn if c t e = {
|
||||
case c of {
|
||||
True -> { t }
|
||||
False -> { e }
|
||||
}
|
||||
}
|
||||
|
||||
defn oddEven l e = {
|
||||
case l of {
|
||||
Nil -> { e }
|
||||
Cons x xs -> { evenOdd xs e }
|
||||
}
|
||||
}
|
||||
|
||||
defn evenOdd l e = {
|
||||
case l of {
|
||||
Nil -> { e }
|
||||
Cons x xs -> { oddEven xs e }
|
||||
}
|
||||
}
|
||||
|
||||
defn main = { if (oddEven (Cons 1 (Cons 2 (Cons 3 Nil))) True) (oddEven (Cons 1 (Cons 2 (Cons 3 Nil))) 1) 3 }
|
||||
23
code/compiler/13/examples/packed.txt
Normal file
@@ -0,0 +1,23 @@
|
||||
data Pair a b = { Pair a b }
|
||||
|
||||
defn packer = {
|
||||
let {
|
||||
data Packed a = { Packed a }
|
||||
defn pack a = { Packed a }
|
||||
defn unpack p = {
|
||||
case p of {
|
||||
Packed a -> { a }
|
||||
}
|
||||
}
|
||||
} in {
|
||||
Pair pack unpack
|
||||
}
|
||||
}
|
||||
|
||||
defn main = {
|
||||
case packer of {
|
||||
Pair pack unpack -> {
|
||||
unpack (pack 3)
|
||||
}
|
||||
}
|
||||
}
|
||||
17
code/compiler/13/examples/pair.txt
Normal file
@@ -0,0 +1,17 @@
|
||||
data Pair a b = { MkPair a b }
|
||||
|
||||
defn fst p = {
|
||||
case p of {
|
||||
MkPair a b -> { a }
|
||||
}
|
||||
}
|
||||
|
||||
defn snd p = {
|
||||
case p of {
|
||||
MkPair a b -> { b }
|
||||
}
|
||||
}
|
||||
|
||||
defn pair = { MkPair 1 (MkPair 2 3) }
|
||||
|
||||
defn main = { fst pair + snd (snd pair) }
|
||||
122
code/compiler/13/examples/primes.txt
Normal file
@@ -0,0 +1,122 @@
|
||||
data List = { Nil, Cons Nat List }
|
||||
data Bool = { True, False }
|
||||
data Nat = { O, S Nat }
|
||||
|
||||
defn if c t e = {
|
||||
case c of {
|
||||
True -> { t }
|
||||
False -> { e }
|
||||
}
|
||||
}
|
||||
|
||||
defn toInt n = {
|
||||
case n of {
|
||||
O -> { 0 }
|
||||
S np -> { 1 + toInt np }
|
||||
}
|
||||
}
|
||||
|
||||
defn lte n m = {
|
||||
case m of {
|
||||
O -> {
|
||||
case n of {
|
||||
O -> { True }
|
||||
S np -> { False }
|
||||
}
|
||||
}
|
||||
S mp -> {
|
||||
case n of {
|
||||
O -> { True }
|
||||
S np -> { lte np mp }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defn minus n m = {
|
||||
case m of {
|
||||
O -> { n }
|
||||
S mp -> {
|
||||
case n of {
|
||||
O -> { O }
|
||||
S np -> {
|
||||
minus np mp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defn mod n m = {
|
||||
if (lte m n) (mod (minus n m) m) n
|
||||
}
|
||||
|
||||
defn notDivisibleBy n m = {
|
||||
case (mod m n) of {
|
||||
O -> { False }
|
||||
S mp -> { True }
|
||||
}
|
||||
}
|
||||
|
||||
defn filter f l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { if (f x) (Cons x (filter f xs)) (filter f xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn map f l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> { Cons (f x) (map f xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn nats = {
|
||||
Cons (S (S O)) (map S nats)
|
||||
}
|
||||
|
||||
defn primesRec l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons p xs -> { Cons p (primesRec (filter (notDivisibleBy p) xs)) }
|
||||
}
|
||||
}
|
||||
|
||||
defn primes = {
|
||||
primesRec nats
|
||||
}
|
||||
|
||||
defn take n l = {
|
||||
case l of {
|
||||
Nil -> { Nil }
|
||||
Cons x xs -> {
|
||||
case n of {
|
||||
O -> { Nil }
|
||||
S np -> { Cons x (take np xs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defn head l = {
|
||||
case l of {
|
||||
Nil -> { O }
|
||||
Cons x xs -> { x }
|
||||
}
|
||||
}
|
||||
|
||||
defn reverseAcc a l = {
|
||||
case l of {
|
||||
Nil -> { a }
|
||||
Cons x xs -> { reverseAcc (Cons x a) xs }
|
||||
}
|
||||
}
|
||||
|
||||
defn reverse l = {
|
||||
reverseAcc Nil l
|
||||
}
|
||||
|
||||
defn main = {
|
||||
toInt (head (reverse (take ((S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S (S O))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) primes)))
|
||||
}
|
||||
31
code/compiler/13/examples/runtime1.c
Normal file
@@ -0,0 +1,31 @@
|
||||
#include "../runtime.h"
|
||||
|
||||
void f_add(struct stack* s) {
|
||||
struct node_num* left = (struct node_num*) eval(stack_peek(s, 0));
|
||||
struct node_num* right = (struct node_num*) eval(stack_peek(s, 1));
|
||||
stack_push(s, (struct node_base*) alloc_num(left->value + right->value));
|
||||
}
|
||||
|
||||
void f_main(struct stack* s) {
|
||||
// PushInt 320
|
||||
stack_push(s, (struct node_base*) alloc_num(320));
|
||||
|
||||
// PushInt 6
|
||||
stack_push(s, (struct node_base*) alloc_num(6));
|
||||
|
||||
// PushGlobal f_add (the function for +)
|
||||
stack_push(s, (struct node_base*) alloc_global(f_add, 2));
|
||||
|
||||
struct node_base* left;
|
||||
struct node_base* right;
|
||||
|
||||
// MkApp
|
||||
left = stack_pop(s);
|
||||
right = stack_pop(s);
|
||||
stack_push(s, (struct node_base*) alloc_app(left, right));
|
||||
|
||||
// MkApp
|
||||
left = stack_pop(s);
|
||||
right = stack_pop(s);
|
||||
stack_push(s, (struct node_base*) alloc_app(left, right));
|
||||
}
|
||||
2
code/compiler/13/examples/works1.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
defn main = { sum 320 6 }
|
||||
defn sum x y = { x + y }
|
||||
3
code/compiler/13/examples/works2.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
defn add x y = { x + y }
|
||||
defn double x = { add x x }
|
||||
defn main = { double 163 }
|
||||
9
code/compiler/13/examples/works3.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
data List a = { Nil, Cons a (List a) }
|
||||
data Bool = { True, False }
|
||||
defn length l = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x xs -> { 1 + length xs }
|
||||
}
|
||||
}
|
||||
defn main = { length (Cons 1 (Cons 2 (Cons 3 Nil))) + length (Cons True (Cons False (Cons True Nil))) }
|
||||
16
code/compiler/13/examples/works4.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn add x y = { x + y }
|
||||
defn mul x y = { x * y }
|
||||
|
||||
defn foldr f b l = {
|
||||
case l of {
|
||||
Nil -> { b }
|
||||
Cons x xs -> { f x (foldr f b xs) }
|
||||
}
|
||||
}
|
||||
|
||||
defn main = {
|
||||
foldr add 0 (Cons 1 (Cons 2 (Cons 3 (Cons 4 Nil)))) +
|
||||
foldr mul 1 (Cons 1 (Cons 2 (Cons 3 (Cons 4 Nil))))
|
||||
}
|
||||
17
code/compiler/13/examples/works5.txt
Normal file
@@ -0,0 +1,17 @@
|
||||
data List = { Nil, Cons Int List }
|
||||
|
||||
defn sumZip l m = {
|
||||
case l of {
|
||||
Nil -> { 0 }
|
||||
Cons x xs -> {
|
||||
case m of {
|
||||
Nil -> { 0 }
|
||||
Cons y ys -> { x + y + sumZip xs ys }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defn ones = { Cons 1 ones }
|
||||
|
||||
defn main = { sumZip ones (Cons 1 (Cons 2 (Cons 3 Nil))) }
|
||||
76
code/compiler/13/global_scope.cpp
Normal file
@@ -0,0 +1,76 @@
|
||||
#include "global_scope.hpp"
|
||||
#include "ast.hpp"
|
||||
|
||||
void global_function::compile() {
|
||||
env_ptr new_env = env_ptr(new env_offset(0, nullptr));
|
||||
for(auto it = params.rbegin(); it != params.rend(); it++) {
|
||||
new_env = env_ptr(new env_var(*it, new_env));
|
||||
}
|
||||
body->compile(new_env, instructions);
|
||||
instructions.push_back(instruction_ptr(new instruction_update(params.size())));
|
||||
instructions.push_back(instruction_ptr(new instruction_pop(params.size())));
|
||||
}
|
||||
|
||||
void global_function::declare_llvm(llvm_context& ctx) {
|
||||
generated_function = ctx.create_custom_function(name, params.size());
|
||||
}
|
||||
|
||||
void global_function::generate_llvm(llvm_context& ctx) {
|
||||
ctx.get_builder().SetInsertPoint(&generated_function->getEntryBlock());
|
||||
for(auto& instruction : instructions) {
|
||||
instruction->gen_llvm(ctx, generated_function);
|
||||
}
|
||||
ctx.get_builder().CreateRetVoid();
|
||||
}
|
||||
|
||||
void global_constructor::generate_llvm(llvm_context& ctx) {
|
||||
auto new_function =
|
||||
ctx.create_custom_function(name, arity);
|
||||
std::vector<instruction_ptr> instructions;
|
||||
instructions.push_back(instruction_ptr(new instruction_pack(tag, arity)));
|
||||
instructions.push_back(instruction_ptr(new instruction_update(0)));
|
||||
ctx.get_builder().SetInsertPoint(&new_function->getEntryBlock());
|
||||
for (auto& instruction : instructions) {
|
||||
instruction->gen_llvm(ctx, new_function);
|
||||
}
|
||||
ctx.get_builder().CreateRetVoid();
|
||||
}
|
||||
|
||||
global_function& global_scope::add_function(
|
||||
const std::string& n,
|
||||
std::vector<std::string> ps,
|
||||
ast_ptr b) {
|
||||
auto name = mng->new_mangled_name(n);
|
||||
global_function* new_function =
|
||||
new global_function(std::move(name), std::move(ps), std::move(b));
|
||||
functions.push_back(global_function_ptr(new_function));
|
||||
return *new_function;
|
||||
}
|
||||
|
||||
global_constructor& global_scope::add_constructor(
|
||||
const std::string& n,
|
||||
int8_t t,
|
||||
size_t a) {
|
||||
auto name = mng->new_mangled_name(n);
|
||||
global_constructor* new_constructor = new global_constructor(name, t, a);
|
||||
constructors.push_back(global_constructor_ptr(new_constructor));
|
||||
return *new_constructor;
|
||||
}
|
||||
|
||||
void global_scope::compile() {
|
||||
for(auto& function : functions) {
|
||||
function->compile();
|
||||
}
|
||||
}
|
||||
|
||||
void global_scope::generate_llvm(llvm_context& ctx) {
|
||||
for(auto& constructor : constructors) {
|
||||
constructor->generate_llvm(ctx);
|
||||
}
|
||||
for(auto& function : functions) {
|
||||
function->declare_llvm(ctx);
|
||||
}
|
||||
for(auto& function : functions) {
|
||||
function->generate_llvm(ctx);
|
||||
}
|
||||
}
|
||||
60
code/compiler/13/global_scope.hpp
Normal file
@@ -0,0 +1,60 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <llvm/IR/Function.h>
|
||||
#include "instruction.hpp"
|
||||
#include "mangler.hpp"
|
||||
|
||||
struct ast;
|
||||
using ast_ptr = std::unique_ptr<ast>;
|
||||
|
||||
struct global_function {
|
||||
std::string name;
|
||||
std::vector<std::string> params;
|
||||
ast_ptr body;
|
||||
|
||||
std::vector<instruction_ptr> instructions;
|
||||
llvm::Function* generated_function;
|
||||
|
||||
global_function(std::string n, std::vector<std::string> ps, ast_ptr b)
|
||||
: name(std::move(n)), params(std::move(ps)), body(std::move(b)) {}
|
||||
|
||||
void compile();
|
||||
void declare_llvm(llvm_context& ctx);
|
||||
void generate_llvm(llvm_context& ctx);
|
||||
};
|
||||
|
||||
using global_function_ptr = std::unique_ptr<global_function>;
|
||||
|
||||
struct global_constructor {
|
||||
std::string name;
|
||||
int8_t tag;
|
||||
size_t arity;
|
||||
|
||||
global_constructor(std::string n, int8_t t, size_t a)
|
||||
: name(std::move(n)), tag(t), arity(a) {}
|
||||
|
||||
void generate_llvm(llvm_context& ctx);
|
||||
};
|
||||
|
||||
using global_constructor_ptr = std::unique_ptr<global_constructor>;
|
||||
|
||||
class global_scope {
|
||||
private:
|
||||
std::vector<global_function_ptr> functions;
|
||||
std::vector<global_constructor_ptr> constructors;
|
||||
mangler* mng;
|
||||
|
||||
public:
|
||||
global_scope(mangler& m) : mng(&m) {}
|
||||
|
||||
global_function& add_function(
|
||||
const std::string& n,
|
||||
std::vector<std::string> ps,
|
||||
ast_ptr b);
|
||||
global_constructor& add_constructor(const std::string& n, int8_t t, size_t a);
|
||||
|
||||
void compile();
|
||||
void generate_llvm(llvm_context& ctx);
|
||||
};
|
||||
114
code/compiler/13/graph.cpp
Normal file
@@ -0,0 +1,114 @@
|
||||
#include "graph.hpp"
|
||||
|
||||
std::set<function_graph::edge> function_graph::compute_transitive_edges() {
|
||||
std::set<edge> transitive_edges;
|
||||
transitive_edges.insert(edges.begin(), edges.end());
|
||||
for(auto& connector : adjacency_lists) {
|
||||
for(auto& from : adjacency_lists) {
|
||||
edge to_connector { from.first, connector.first };
|
||||
for(auto& to : adjacency_lists) {
|
||||
edge full_jump { from.first, to.first };
|
||||
if(transitive_edges.find(full_jump) != transitive_edges.end()) continue;
|
||||
|
||||
edge from_connector { connector.first, to.first };
|
||||
if(transitive_edges.find(to_connector) != transitive_edges.end() &&
|
||||
transitive_edges.find(from_connector) != transitive_edges.end())
|
||||
transitive_edges.insert(std::move(full_jump));
|
||||
}
|
||||
}
|
||||
}
|
||||
return transitive_edges;
|
||||
}
|
||||
|
||||
void function_graph::create_groups(
|
||||
const std::set<edge>& transitive_edges,
|
||||
std::map<function, group_id>& group_ids,
|
||||
std::map<group_id, data_ptr>& group_data_map) {
|
||||
group_id id_counter = 0;
|
||||
for(auto& vertex : adjacency_lists) {
|
||||
if(group_ids.find(vertex.first) != group_ids.end())
|
||||
continue;
|
||||
data_ptr new_group(new group_data);
|
||||
new_group->functions.insert(vertex.first);
|
||||
group_data_map[id_counter] = new_group;
|
||||
group_ids[vertex.first] = id_counter;
|
||||
for(auto& other_vertex : adjacency_lists) {
|
||||
if(transitive_edges.find({vertex.first, other_vertex.first}) != transitive_edges.end() &&
|
||||
transitive_edges.find({other_vertex.first, vertex.first}) != transitive_edges.end()) {
|
||||
group_ids[other_vertex.first] = id_counter;
|
||||
new_group->functions.insert(other_vertex.first);
|
||||
}
|
||||
}
|
||||
id_counter++;
|
||||
}
|
||||
}
|
||||
|
||||
void function_graph::create_edges(
|
||||
std::map<function, group_id>& group_ids,
|
||||
std::map<group_id, data_ptr>& group_data_map) {
|
||||
std::set<std::pair<group_id, group_id>> group_edges;
|
||||
for(auto& vertex : adjacency_lists) {
|
||||
auto vertex_id = group_ids[vertex.first];
|
||||
auto& vertex_data = group_data_map[vertex_id];
|
||||
for(auto& other_vertex : vertex.second) {
|
||||
auto other_id = group_ids[other_vertex];
|
||||
if(vertex_id == other_id) continue;
|
||||
if(group_edges.find({vertex_id, other_id}) != group_edges.end())
|
||||
continue;
|
||||
group_edges.insert({vertex_id, other_id});
|
||||
vertex_data->adjacency_list.insert(other_id);
|
||||
group_data_map[other_id]->indegree++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<group_ptr> function_graph::generate_order(
|
||||
std::map<function, group_id>& group_ids,
|
||||
std::map<group_id, data_ptr>& group_data_map) {
|
||||
std::queue<group_id> id_queue;
|
||||
std::vector<group_ptr> output;
|
||||
for(auto& group : group_data_map) {
|
||||
if(group.second->indegree == 0) id_queue.push(group.first);
|
||||
}
|
||||
|
||||
while(!id_queue.empty()) {
|
||||
auto new_id = id_queue.front();
|
||||
auto& group_data = group_data_map[new_id];
|
||||
group_ptr output_group(new group);
|
||||
output_group->members = std::move(group_data->functions);
|
||||
id_queue.pop();
|
||||
|
||||
for(auto& adjacent_group : group_data->adjacency_list) {
|
||||
if(--group_data_map[adjacent_group]->indegree == 0)
|
||||
id_queue.push(adjacent_group);
|
||||
}
|
||||
|
||||
output.push_back(std::move(output_group));
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
std::set<function>& function_graph::add_function(const function& f) {
|
||||
auto adjacency_list_it = adjacency_lists.find(f);
|
||||
if(adjacency_list_it != adjacency_lists.end()) {
|
||||
return adjacency_list_it->second;
|
||||
} else {
|
||||
return adjacency_lists[f] = { };
|
||||
}
|
||||
}
|
||||
|
||||
void function_graph::add_edge(const function& from, const function& to) {
|
||||
add_function(from).insert(to);
|
||||
edges.insert({ from, to });
|
||||
}
|
||||
|
||||
std::vector<group_ptr> function_graph::compute_order() {
|
||||
std::set<edge> transitive_edges = compute_transitive_edges();
|
||||
std::map<function, group_id> group_ids;
|
||||
std::map<group_id, data_ptr> group_data_map;
|
||||
|
||||
create_groups(transitive_edges, group_ids, group_data_map);
|
||||
create_edges(group_ids, group_data_map);
|
||||
return generate_order(group_ids, group_data_map);
|
||||
}
|
||||
54
code/compiler/13/graph.hpp
Normal file
@@ -0,0 +1,54 @@
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
using function = std::string;
|
||||
|
||||
struct group {
|
||||
std::set<function> members;
|
||||
};
|
||||
|
||||
using group_ptr = std::unique_ptr<group>;
|
||||
|
||||
class function_graph {
|
||||
private:
|
||||
using group_id = size_t;
|
||||
|
||||
struct group_data {
|
||||
std::set<function> functions;
|
||||
std::set<group_id> adjacency_list;
|
||||
size_t indegree;
|
||||
|
||||
group_data() : indegree(0) {}
|
||||
};
|
||||
|
||||
using data_ptr = std::shared_ptr<group_data>;
|
||||
using edge = std::pair<function, function>;
|
||||
using group_edge = std::pair<group_id, group_id>;
|
||||
|
||||
std::map<function, std::set<function>> adjacency_lists;
|
||||
std::set<edge> edges;
|
||||
|
||||
std::set<edge> compute_transitive_edges();
|
||||
void create_groups(
|
||||
const std::set<edge>&,
|
||||
std::map<function, group_id>&,
|
||||
std::map<group_id, data_ptr>&);
|
||||
void create_edges(
|
||||
std::map<function, group_id>&,
|
||||
std::map<group_id, data_ptr>&);
|
||||
std::vector<group_ptr> generate_order(
|
||||
std::map<function, group_id>&,
|
||||
std::map<group_id, data_ptr>&);
|
||||
|
||||
public:
|
||||
std::set<function>& add_function(const function& f);
|
||||
void add_edge(const function& from, const function& to);
|
||||
std::vector<group_ptr> compute_order();
|
||||
};
|
||||
177
code/compiler/13/instruction.cpp
Normal file
@@ -0,0 +1,177 @@
|
||||
#include "instruction.hpp"
|
||||
#include "llvm_context.hpp"
|
||||
#include <llvm/IR/BasicBlock.h>
|
||||
#include <llvm/IR/Function.h>
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
static void print_indent(int n, std::ostream& to) {
|
||||
while(n--) to << " ";
|
||||
}
|
||||
|
||||
void instruction_pushint::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "PushInt(" << value << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_pushint::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_push(f, ctx.create_num(f, ctx.create_i32(value)));
|
||||
}
|
||||
|
||||
void instruction_pushglobal::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "PushGlobal(" << name << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_pushglobal::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
auto& global_f = ctx.get_custom_function(name);
|
||||
auto arity = ctx.create_i32(global_f.arity);
|
||||
ctx.create_push(f, ctx.create_global(f, global_f.function, arity));
|
||||
}
|
||||
|
||||
void instruction_push::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Push(" << offset << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_push::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_push(f, ctx.create_peek(f, ctx.create_size(offset)));
|
||||
}
|
||||
|
||||
void instruction_pop::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Pop(" << count << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_pop::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_popn(f, ctx.create_size(count));
|
||||
}
|
||||
|
||||
void instruction_mkapp::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "MkApp()" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_mkapp::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
auto left = ctx.create_pop(f);
|
||||
auto right = ctx.create_pop(f);
|
||||
ctx.create_push(f, ctx.create_app(f, left, right));
|
||||
}
|
||||
|
||||
void instruction_update::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Update(" << offset << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_update::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_update(f, ctx.create_size(offset));
|
||||
}
|
||||
|
||||
void instruction_pack::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Pack(" << tag << ", " << size << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_pack::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_pack(f, ctx.create_size(size), ctx.create_i8(tag));
|
||||
}
|
||||
|
||||
void instruction_split::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Split()" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_split::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_split(f, ctx.create_size(size));
|
||||
}
|
||||
|
||||
void instruction_jump::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Jump(" << std::endl;
|
||||
for(auto& instruction_set : branches) {
|
||||
for(auto& instruction : instruction_set) {
|
||||
instruction->print(indent + 2, to);
|
||||
}
|
||||
to << std::endl;
|
||||
}
|
||||
print_indent(indent, to);
|
||||
to << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_jump::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
auto top_node = ctx.create_peek(f, ctx.create_size(0));
|
||||
auto tag = ctx.unwrap_data_tag(top_node);
|
||||
auto safety_block = ctx.create_basic_block("safety", f);
|
||||
auto switch_op = ctx.get_builder().CreateSwitch(tag, safety_block, tag_mappings.size());
|
||||
std::vector<BasicBlock*> blocks;
|
||||
|
||||
for(auto& branch : branches) {
|
||||
auto branch_block = ctx.create_basic_block("branch", f);
|
||||
ctx.get_builder().SetInsertPoint(branch_block);
|
||||
for(auto& instruction : branch) {
|
||||
instruction->gen_llvm(ctx, f);
|
||||
}
|
||||
ctx.get_builder().CreateBr(safety_block);
|
||||
blocks.push_back(branch_block);
|
||||
}
|
||||
|
||||
for(auto& mapping : tag_mappings) {
|
||||
switch_op->addCase(ctx.create_i8(mapping.first), blocks[mapping.second]);
|
||||
}
|
||||
|
||||
ctx.get_builder().SetInsertPoint(safety_block);
|
||||
}
|
||||
|
||||
void instruction_slide::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Slide(" << offset << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_slide::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_slide(f, ctx.create_size(offset));
|
||||
}
|
||||
|
||||
void instruction_binop::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "BinOp(" << op_action(op) << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_binop::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
auto left_int = ctx.unwrap_num(ctx.create_pop(f));
|
||||
auto right_int = ctx.unwrap_num(ctx.create_pop(f));
|
||||
llvm::Value* result;
|
||||
switch(op) {
|
||||
case PLUS: result = ctx.get_builder().CreateAdd(left_int, right_int); break;
|
||||
case MINUS: result = ctx.get_builder().CreateSub(left_int, right_int); break;
|
||||
case TIMES: result = ctx.get_builder().CreateMul(left_int, right_int); break;
|
||||
case DIVIDE: result = ctx.get_builder().CreateSDiv(left_int, right_int); break;
|
||||
}
|
||||
ctx.create_push(f, ctx.create_num(f, result));
|
||||
}
|
||||
|
||||
void instruction_eval::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Eval()" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_eval::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_unwind(f);
|
||||
}
|
||||
|
||||
void instruction_alloc::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Alloc(" << amount << ")" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_alloc::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
ctx.create_alloc(f, ctx.create_size(amount));
|
||||
}
|
||||
|
||||
void instruction_unwind::print(int indent, std::ostream& to) const {
|
||||
print_indent(indent, to);
|
||||
to << "Unwind()" << std::endl;
|
||||
}
|
||||
|
||||
void instruction_unwind::gen_llvm(llvm_context& ctx, Function* f) const {
|
||||
// Nothing
|
||||
}
|
||||
142
code/compiler/13/instruction.hpp
Normal file
@@ -0,0 +1,142 @@
|
||||
#pragma once
|
||||
#include <llvm/IR/Function.h>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <ostream>
|
||||
#include "binop.hpp"
|
||||
#include "llvm_context.hpp"
|
||||
|
||||
struct instruction {
|
||||
virtual ~instruction() = default;
|
||||
|
||||
virtual void print(int indent, std::ostream& to) const = 0;
|
||||
virtual void gen_llvm(llvm_context& ctx, llvm::Function* f) const = 0;
|
||||
};
|
||||
|
||||
using instruction_ptr = std::unique_ptr<instruction>;
|
||||
|
||||
struct instruction_pushint : public instruction {
|
||||
int value;
|
||||
|
||||
instruction_pushint(int v)
|
||||
: value(v) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_pushglobal : public instruction {
|
||||
std::string name;
|
||||
|
||||
instruction_pushglobal(std::string n)
|
||||
: name(std::move(n)) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_push : public instruction {
|
||||
int offset;
|
||||
|
||||
instruction_push(int o)
|
||||
: offset(o) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_pop : public instruction {
|
||||
int count;
|
||||
|
||||
instruction_pop(int c)
|
||||
: count(c) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_mkapp : public instruction {
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_update : public instruction {
|
||||
int offset;
|
||||
|
||||
instruction_update(int o)
|
||||
: offset(o) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_pack : public instruction {
|
||||
int tag;
|
||||
int size;
|
||||
|
||||
instruction_pack(int t, int s)
|
||||
: tag(t), size(s) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_split : public instruction {
|
||||
int size;
|
||||
|
||||
instruction_split(int s)
|
||||
: size(s) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_jump : public instruction {
|
||||
std::vector<std::vector<instruction_ptr>> branches;
|
||||
std::map<int, int> tag_mappings;
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_slide : public instruction {
|
||||
int offset;
|
||||
|
||||
instruction_slide(int o)
|
||||
: offset(o) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_binop : public instruction {
|
||||
binop op;
|
||||
|
||||
instruction_binop(binop o)
|
||||
: op(o) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_eval : public instruction {
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_alloc : public instruction {
|
||||
int amount;
|
||||
|
||||
instruction_alloc(int a)
|
||||
: amount(a) {}
|
||||
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
|
||||
struct instruction_unwind : public instruction {
|
||||
void print(int indent, std::ostream& to) const;
|
||||
void gen_llvm(llvm_context& ctx, llvm::Function* f) const;
|
||||
};
|
||||
294
code/compiler/13/llvm_context.cpp
Normal file
@@ -0,0 +1,294 @@
|
||||
#include "llvm_context.hpp"
|
||||
#include <llvm/IR/DerivedTypes.h>
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
void llvm_context::create_types() {
|
||||
stack_type = StructType::create(ctx, "stack");
|
||||
gmachine_type = StructType::create(ctx, "gmachine");
|
||||
stack_ptr_type = PointerType::getUnqual(stack_type);
|
||||
gmachine_ptr_type = PointerType::getUnqual(gmachine_type);
|
||||
tag_type = IntegerType::getInt8Ty(ctx);
|
||||
struct_types["node_base"] = StructType::create(ctx, "node_base");
|
||||
struct_types["node_app"] = StructType::create(ctx, "node_app");
|
||||
struct_types["node_num"] = StructType::create(ctx, "node_num");
|
||||
struct_types["node_global"] = StructType::create(ctx, "node_global");
|
||||
struct_types["node_ind"] = StructType::create(ctx, "node_ind");
|
||||
struct_types["node_data"] = StructType::create(ctx, "node_data");
|
||||
node_ptr_type = PointerType::getUnqual(struct_types.at("node_base"));
|
||||
function_type = FunctionType::get(Type::getVoidTy(ctx), { gmachine_ptr_type }, false);
|
||||
|
||||
gmachine_type->setBody(
|
||||
stack_ptr_type,
|
||||
node_ptr_type,
|
||||
IntegerType::getInt64Ty(ctx),
|
||||
IntegerType::getInt64Ty(ctx)
|
||||
);
|
||||
struct_types.at("node_base")->setBody(
|
||||
IntegerType::getInt32Ty(ctx),
|
||||
IntegerType::getInt8Ty(ctx),
|
||||
node_ptr_type
|
||||
);
|
||||
struct_types.at("node_app")->setBody(
|
||||
struct_types.at("node_base"),
|
||||
node_ptr_type,
|
||||
node_ptr_type
|
||||
);
|
||||
struct_types.at("node_num")->setBody(
|
||||
struct_types.at("node_base"),
|
||||
IntegerType::getInt32Ty(ctx)
|
||||
);
|
||||
struct_types.at("node_global")->setBody(
|
||||
struct_types.at("node_base"),
|
||||
FunctionType::get(Type::getVoidTy(ctx), { stack_ptr_type }, false)
|
||||
);
|
||||
struct_types.at("node_ind")->setBody(
|
||||
struct_types.at("node_base"),
|
||||
node_ptr_type
|
||||
);
|
||||
struct_types.at("node_data")->setBody(
|
||||
struct_types.at("node_base"),
|
||||
IntegerType::getInt8Ty(ctx),
|
||||
PointerType::getUnqual(node_ptr_type)
|
||||
);
|
||||
}
|
||||
|
||||
void llvm_context::create_functions() {
|
||||
auto void_type = Type::getVoidTy(ctx);
|
||||
auto sizet_type = IntegerType::get(ctx, sizeof(size_t) * 8);
|
||||
functions["stack_init"] = Function::Create(
|
||||
FunctionType::get(void_type, { stack_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_init",
|
||||
&module
|
||||
);
|
||||
functions["stack_free"] = Function::Create(
|
||||
FunctionType::get(void_type, { stack_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_free",
|
||||
&module
|
||||
);
|
||||
functions["stack_push"] = Function::Create(
|
||||
FunctionType::get(void_type, { stack_ptr_type, node_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_push",
|
||||
&module
|
||||
);
|
||||
functions["stack_pop"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { stack_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_pop",
|
||||
&module
|
||||
);
|
||||
functions["stack_peek"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { stack_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_peek",
|
||||
&module
|
||||
);
|
||||
functions["stack_popn"] = Function::Create(
|
||||
FunctionType::get(void_type, { stack_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"stack_popn",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_slide"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_slide",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_update"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_update",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_alloc"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_alloc",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_pack"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type, sizet_type, tag_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_pack",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_split"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type, sizet_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_split",
|
||||
&module
|
||||
);
|
||||
functions["gmachine_track"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { gmachine_ptr_type, node_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"gmachine_track",
|
||||
&module
|
||||
);
|
||||
|
||||
auto int32_type = IntegerType::getInt32Ty(ctx);
|
||||
functions["alloc_app"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { node_ptr_type, node_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"alloc_app",
|
||||
&module
|
||||
);
|
||||
functions["alloc_num"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { int32_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"alloc_num",
|
||||
&module
|
||||
);
|
||||
functions["alloc_global"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { function_type, int32_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"alloc_global",
|
||||
&module
|
||||
);
|
||||
functions["alloc_ind"] = Function::Create(
|
||||
FunctionType::get(node_ptr_type, { node_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"alloc_ind",
|
||||
&module
|
||||
);
|
||||
|
||||
functions["unwind"] = Function::Create(
|
||||
FunctionType::get(void_type, { gmachine_ptr_type }, false),
|
||||
Function::LinkageTypes::ExternalLinkage,
|
||||
"unwind",
|
||||
&module
|
||||
);
|
||||
}
|
||||
|
||||
IRBuilder<>& llvm_context::get_builder() {
|
||||
return builder;
|
||||
}
|
||||
|
||||
Module& llvm_context::get_module() {
|
||||
return module;
|
||||
}
|
||||
|
||||
BasicBlock* llvm_context::create_basic_block(const std::string& name, llvm::Function* f) {
|
||||
return BasicBlock::Create(ctx, name, f);
|
||||
}
|
||||
|
||||
ConstantInt* llvm_context::create_i8(int8_t i) {
|
||||
return ConstantInt::get(ctx, APInt(8, i));
|
||||
}
|
||||
ConstantInt* llvm_context::create_i32(int32_t i) {
|
||||
return ConstantInt::get(ctx, APInt(32, i));
|
||||
}
|
||||
ConstantInt* llvm_context::create_size(size_t i) {
|
||||
return ConstantInt::get(ctx, APInt(sizeof(size_t) * 8, i));
|
||||
}
|
||||
|
||||
Value* llvm_context::create_pop(Function* f) {
|
||||
auto pop_f = functions.at("stack_pop");
|
||||
return builder.CreateCall(pop_f, { unwrap_gmachine_stack_ptr(f->arg_begin()) });
|
||||
}
|
||||
Value* llvm_context::create_peek(Function* f, Value* off) {
|
||||
auto peek_f = functions.at("stack_peek");
|
||||
return builder.CreateCall(peek_f, { unwrap_gmachine_stack_ptr(f->arg_begin()), off });
|
||||
}
|
||||
void llvm_context::create_push(Function* f, Value* v) {
|
||||
auto push_f = functions.at("stack_push");
|
||||
builder.CreateCall(push_f, { unwrap_gmachine_stack_ptr(f->arg_begin()), v });
|
||||
}
|
||||
void llvm_context::create_popn(Function* f, Value* off) {
|
||||
auto popn_f = functions.at("stack_popn");
|
||||
builder.CreateCall(popn_f, { unwrap_gmachine_stack_ptr(f->arg_begin()), off });
|
||||
}
|
||||
void llvm_context::create_update(Function* f, Value* off) {
|
||||
auto update_f = functions.at("gmachine_update");
|
||||
builder.CreateCall(update_f, { f->arg_begin(), off });
|
||||
}
|
||||
void llvm_context::create_pack(Function* f, Value* c, Value* t) {
|
||||
auto pack_f = functions.at("gmachine_pack");
|
||||
builder.CreateCall(pack_f, { f->arg_begin(), c, t });
|
||||
}
|
||||
void llvm_context::create_split(Function* f, Value* c) {
|
||||
auto split_f = functions.at("gmachine_split");
|
||||
builder.CreateCall(split_f, { f->arg_begin(), c });
|
||||
}
|
||||
void llvm_context::create_slide(Function* f, Value* off) {
|
||||
auto slide_f = functions.at("gmachine_slide");
|
||||
builder.CreateCall(slide_f, { f->arg_begin(), off });
|
||||
}
|
||||
void llvm_context::create_alloc(Function* f, Value* n) {
|
||||
auto alloc_f = functions.at("gmachine_alloc");
|
||||
builder.CreateCall(alloc_f, { f->arg_begin(), n });
|
||||
}
|
||||
Value* llvm_context::create_track(Function* f, Value* v) {
|
||||
auto track_f = functions.at("gmachine_track");
|
||||
return builder.CreateCall(track_f, { f->arg_begin(), v });
|
||||
}
|
||||
|
||||
void llvm_context::create_unwind(Function* f) {
|
||||
auto unwind_f = functions.at("unwind");
|
||||
builder.CreateCall(unwind_f, { f->args().begin() });
|
||||
}
|
||||
|
||||
Value* llvm_context::unwrap_gmachine_stack_ptr(Value* g) {
|
||||
auto offset_0 = create_i32(0);
|
||||
return builder.CreateGEP(g, { offset_0, offset_0 });
|
||||
}
|
||||
|
||||
Value* llvm_context::unwrap_num(Value* v) {
|
||||
auto num_ptr_type = PointerType::getUnqual(struct_types.at("node_num"));
|
||||
auto cast = builder.CreatePointerCast(v, num_ptr_type);
|
||||
auto offset_0 = create_i32(0);
|
||||
auto offset_1 = create_i32(1);
|
||||
auto int_ptr = builder.CreateGEP(cast, { offset_0, offset_1 });
|
||||
return builder.CreateLoad(int_ptr);
|
||||
}
|
||||
Value* llvm_context::create_num(Function* f, Value* v) {
|
||||
auto alloc_num_f = functions.at("alloc_num");
|
||||
auto alloc_num_call = builder.CreateCall(alloc_num_f, { v });
|
||||
return create_track(f, alloc_num_call);
|
||||
}
|
||||
|
||||
Value* llvm_context::unwrap_data_tag(Value* v) {
|
||||
auto data_ptr_type = PointerType::getUnqual(struct_types.at("node_data"));
|
||||
auto cast = builder.CreatePointerCast(v, data_ptr_type);
|
||||
auto offset_0 = create_i32(0);
|
||||
auto offset_1 = create_i32(1);
|
||||
auto tag_ptr = builder.CreateGEP(cast, { offset_0, offset_1 });
|
||||
return builder.CreateLoad(tag_ptr);
|
||||
}
|
||||
|
||||
Value* llvm_context::create_global(Function* f, Value* gf, Value* a) {
|
||||
auto alloc_global_f = functions.at("alloc_global");
|
||||
auto alloc_global_call = builder.CreateCall(alloc_global_f, { gf, a });
|
||||
return create_track(f, alloc_global_call);
|
||||
}
|
||||
|
||||
Value* llvm_context::create_app(Function* f, Value* l, Value* r) {
|
||||
auto alloc_app_f = functions.at("alloc_app");
|
||||
auto alloc_app_call = builder.CreateCall(alloc_app_f, { l, r });
|
||||
return create_track(f, alloc_app_call);
|
||||
}
|
||||
|
||||
llvm::Function* llvm_context::create_custom_function(const std::string& name, int32_t arity) {
|
||||
auto void_type = llvm::Type::getVoidTy(ctx);
|
||||
auto new_function = llvm::Function::Create(
|
||||
function_type,
|
||||
llvm::Function::LinkageTypes::ExternalLinkage,
|
||||
"f_" + name,
|
||||
&module
|
||||
);
|
||||
auto start_block = llvm::BasicBlock::Create(ctx, "entry", new_function);
|
||||
|
||||
auto new_custom_f = custom_function_ptr(new custom_function());
|
||||
new_custom_f->arity = arity;
|
||||
new_custom_f->function = new_function;
|
||||
custom_functions["f_" + name] = std::move(new_custom_f);
|
||||
|
||||
return new_function;
|
||||
}
|
||||
|
||||
llvm_context::custom_function& llvm_context::get_custom_function(const std::string& name) {
|
||||
return *custom_functions.at("f_" + name);
|
||||
}
|
||||
81
code/compiler/13/llvm_context.hpp
Normal file
@@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
#include <llvm/IR/DerivedTypes.h>
|
||||
#include <llvm/IR/Function.h>
|
||||
#include <llvm/IR/LLVMContext.h>
|
||||
#include <llvm/IR/IRBuilder.h>
|
||||
#include <llvm/IR/Module.h>
|
||||
#include <llvm/IR/Value.h>
|
||||
#include <map>
|
||||
|
||||
class llvm_context {
|
||||
public:
|
||||
struct custom_function {
|
||||
llvm::Function* function;
|
||||
int32_t arity;
|
||||
};
|
||||
|
||||
using custom_function_ptr = std::unique_ptr<custom_function>;
|
||||
|
||||
private:
|
||||
llvm::LLVMContext ctx;
|
||||
llvm::IRBuilder<> builder;
|
||||
llvm::Module module;
|
||||
|
||||
std::map<std::string, custom_function_ptr> custom_functions;
|
||||
std::map<std::string, llvm::Function*> functions;
|
||||
std::map<std::string, llvm::StructType*> struct_types;
|
||||
|
||||
llvm::StructType* stack_type;
|
||||
llvm::StructType* gmachine_type;
|
||||
llvm::PointerType* stack_ptr_type;
|
||||
llvm::PointerType* gmachine_ptr_type;
|
||||
llvm::PointerType* node_ptr_type;
|
||||
llvm::IntegerType* tag_type;
|
||||
llvm::FunctionType* function_type;
|
||||
|
||||
void create_types();
|
||||
void create_functions();
|
||||
|
||||
public:
|
||||
llvm_context()
|
||||
: builder(ctx), module("bloglang", ctx) {
|
||||
create_types();
|
||||
create_functions();
|
||||
}
|
||||
|
||||
llvm::IRBuilder<>& get_builder();
|
||||
llvm::Module& get_module();
|
||||
|
||||
llvm::BasicBlock* create_basic_block(const std::string& name, llvm::Function* f);
|
||||
|
||||
llvm::ConstantInt* create_i8(int8_t);
|
||||
llvm::ConstantInt* create_i32(int32_t);
|
||||
llvm::ConstantInt* create_size(size_t);
|
||||
|
||||
llvm::Value* create_pop(llvm::Function*);
|
||||
llvm::Value* create_peek(llvm::Function*, llvm::Value*);
|
||||
void create_push(llvm::Function*, llvm::Value*);
|
||||
void create_popn(llvm::Function*, llvm::Value*);
|
||||
void create_update(llvm::Function*, llvm::Value*);
|
||||
void create_pack(llvm::Function*, llvm::Value*, llvm::Value*);
|
||||
void create_split(llvm::Function*, llvm::Value*);
|
||||
void create_slide(llvm::Function*, llvm::Value*);
|
||||
void create_alloc(llvm::Function*, llvm::Value*);
|
||||
llvm::Value* create_track(llvm::Function*, llvm::Value*);
|
||||
|
||||
void create_unwind(llvm::Function*);
|
||||
|
||||
llvm::Value* unwrap_gmachine_stack_ptr(llvm::Value*);
|
||||
|
||||
llvm::Value* unwrap_num(llvm::Value*);
|
||||
llvm::Value* create_num(llvm::Function*, llvm::Value*);
|
||||
|
||||
llvm::Value* unwrap_data_tag(llvm::Value*);
|
||||
|
||||
llvm::Value* create_global(llvm::Function*, llvm::Value*, llvm::Value*);
|
||||
|
||||
llvm::Value* create_app(llvm::Function*, llvm::Value*, llvm::Value*);
|
||||
|
||||
llvm::Function* create_custom_function(const std::string& name, int32_t arity);
|
||||
custom_function& get_custom_function(const std::string& name);
|
||||
};
|
||||
27
code/compiler/13/main.cpp
Normal file
@@ -0,0 +1,27 @@
|
||||
#include "ast.hpp"
|
||||
#include <iostream>
|
||||
#include "parser.hpp"
|
||||
#include "compiler.hpp"
|
||||
#include "error.hpp"
|
||||
|
||||
void yy::parser::error(const yy::location& loc, const std::string& msg) {
|
||||
std::cerr << "An error occured: " << msg << std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if(argc != 2) {
|
||||
std::cerr << "please enter a file to compile." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
compiler cmp(argv[1]);
|
||||
|
||||
try {
|
||||
cmp("program.o");
|
||||
} catch(unification_error& err) {
|
||||
err.pretty_print(std::cerr, cmp.get_file_manager(), cmp.get_type_manager());
|
||||
} catch(type_error& err) {
|
||||
err.pretty_print(std::cerr, cmp.get_file_manager());
|
||||
} catch (compiler_error& err) {
|
||||
err.pretty_print(std::cerr, cmp.get_file_manager());
|
||||
}
|
||||
}
|
||||
17
code/compiler/13/mangler.cpp
Normal file
@@ -0,0 +1,17 @@
|
||||
#include "mangler.hpp"
|
||||
|
||||
std::string mangler::new_mangled_name(const std::string& n) {
|
||||
auto occurence_it = occurence_count.find(n);
|
||||
int occurence = 0;
|
||||
if(occurence_it != occurence_count.end()) {
|
||||
occurence = occurence_it->second + 1;
|
||||
}
|
||||
occurence_count[n] = occurence;
|
||||
|
||||
std::string final_name = n;
|
||||
if (occurence != 0) {
|
||||
final_name += "_";
|
||||
final_name += std::to_string(occurence);
|
||||
}
|
||||
return final_name;
|
||||
}
|
||||
11
code/compiler/13/mangler.hpp
Normal file
@@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
class mangler {
|
||||
private:
|
||||
std::map<std::string, int> occurence_count;
|
||||
|
||||
public:
|
||||
std::string new_mangled_name(const std::string& str);
|
||||
};
|
||||
72
code/compiler/13/parse_driver.cpp
Normal file
@@ -0,0 +1,72 @@
|
||||
#include "parse_driver.hpp"
|
||||
#include "scanner.hpp"
|
||||
#include <sstream>
|
||||
|
||||
file_mgr::file_mgr() : file_offset(0) {
|
||||
line_offsets.push_back(0);
|
||||
}
|
||||
|
||||
void file_mgr::write(const char* buf, size_t len) {
|
||||
string_stream.write(buf, len);
|
||||
file_offset += len;
|
||||
}
|
||||
|
||||
void file_mgr::mark_line() {
|
||||
line_offsets.push_back(file_offset);
|
||||
}
|
||||
|
||||
void file_mgr::finalize() {
|
||||
file_contents = string_stream.str();
|
||||
}
|
||||
|
||||
size_t file_mgr::get_index(int line, int column) const {
|
||||
assert(line > 0 && line <= line_offsets.size());
|
||||
return line_offsets.at(line-1) + column - 1;
|
||||
}
|
||||
|
||||
size_t file_mgr::get_line_end(int line) const {
|
||||
if(line == line_offsets.size()) return file_contents.size();
|
||||
return get_index(line+1, 1);
|
||||
}
|
||||
|
||||
void file_mgr::print_location(
|
||||
std::ostream& stream,
|
||||
const yy::location& loc,
|
||||
bool highlight) const {
|
||||
size_t print_start = get_index(loc.begin.line, 1);
|
||||
size_t highlight_start = get_index(loc.begin.line, loc.begin.column);
|
||||
size_t highlight_end = get_index(loc.end.line, loc.end.column);
|
||||
size_t print_end = get_line_end(loc.end.line);
|
||||
const char* content = file_contents.c_str();
|
||||
stream.write(content + print_start, highlight_start - print_start);
|
||||
if(highlight) stream << "\033[4;31m";
|
||||
stream.write(content + highlight_start, highlight_end - highlight_start);
|
||||
if(highlight) stream << "\033[0m";
|
||||
stream.write(content + highlight_end, print_end - highlight_end);
|
||||
}
|
||||
|
||||
bool parse_driver::operator()() {
|
||||
FILE* stream = fopen(file_name.c_str(), "r");
|
||||
if(!stream) return false;
|
||||
yyscan_t scanner;
|
||||
yylex_init(&scanner);
|
||||
yyset_in(stream, scanner);
|
||||
yy::parser parser(scanner, *this);
|
||||
parser();
|
||||
yylex_destroy(scanner);
|
||||
fclose(stream);
|
||||
file_m->finalize();
|
||||
return true;
|
||||
}
|
||||
|
||||
yy::location& parse_driver::get_current_location() {
|
||||
return location;
|
||||
}
|
||||
|
||||
file_mgr& parse_driver::get_file_manager() const {
|
||||
return *file_m;
|
||||
}
|
||||
|
||||
definition_group& parse_driver::get_global_defs() const {
|
||||
return *global_defs;
|
||||
}
|
||||
58
code/compiler/13/parse_driver.hpp
Normal file
@@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include "definition.hpp"
|
||||
#include "location.hh"
|
||||
#include "parser.hpp"
|
||||
|
||||
struct parse_driver;
|
||||
|
||||
void scanner_init(parse_driver* d, yyscan_t* scanner);
|
||||
void scanner_destroy(yyscan_t* scanner);
|
||||
|
||||
class file_mgr {
|
||||
private:
|
||||
std::ostringstream string_stream;
|
||||
std::string file_contents;
|
||||
|
||||
size_t file_offset;
|
||||
std::vector<size_t> line_offsets;
|
||||
public:
|
||||
file_mgr();
|
||||
|
||||
void write(const char* buffer, size_t len);
|
||||
void mark_line();
|
||||
void finalize();
|
||||
|
||||
size_t get_index(int line, int column) const;
|
||||
size_t get_line_end(int line) const;
|
||||
void print_location(
|
||||
std::ostream& stream,
|
||||
const yy::location& loc,
|
||||
bool highlight = true) const;
|
||||
};
|
||||
|
||||
class parse_driver {
|
||||
private:
|
||||
std::string file_name;
|
||||
yy::location location;
|
||||
definition_group* global_defs;
|
||||
file_mgr* file_m;
|
||||
|
||||
public:
|
||||
parse_driver(
|
||||
file_mgr& mgr,
|
||||
definition_group& defs,
|
||||
const std::string& file)
|
||||
: file_name(file), file_m(&mgr), global_defs(&defs) {}
|
||||
|
||||
bool operator()();
|
||||
yy::location& get_current_location();
|
||||
file_mgr& get_file_manager() const;
|
||||
definition_group& get_global_defs() const;
|
||||
};
|
||||
|
||||
#define YY_DECL yy::parser::symbol_type yylex(yyscan_t yyscanner, parse_driver& drv)
|
||||
|
||||
YY_DECL;
|
||||
48
code/compiler/13/parsed_type.cpp
Normal file
@@ -0,0 +1,48 @@
|
||||
#include "parsed_type.hpp"
|
||||
#include <sstream>
|
||||
#include "type.hpp"
|
||||
#include "type_env.hpp"
|
||||
#include "error.hpp"
|
||||
|
||||
type_ptr parsed_type_app::to_type(
|
||||
const std::set<std::string>& vars,
|
||||
const type_env& e) const {
|
||||
auto parent_type = e.lookup_type(name);
|
||||
if(parent_type == nullptr)
|
||||
throw type_error("no such type or type constructor " + name);
|
||||
type_base* base_type;
|
||||
if(!(base_type = dynamic_cast<type_base*>(parent_type.get())))
|
||||
throw type_error("invalid type " + name);
|
||||
if(base_type->arity != arguments.size()) {
|
||||
std::ostringstream error_stream;
|
||||
error_stream << "invalid application of type ";
|
||||
error_stream << name;
|
||||
error_stream << " (" << base_type->arity << " argument(s) expected, ";
|
||||
error_stream << "but " << arguments.size() << " provided)";
|
||||
throw type_error(error_stream.str());
|
||||
}
|
||||
|
||||
type_app* new_app = new type_app(std::move(parent_type));
|
||||
type_ptr to_return(new_app);
|
||||
for(auto& arg : arguments) {
|
||||
new_app->arguments.push_back(arg->to_type(vars, e));
|
||||
}
|
||||
return to_return;
|
||||
}
|
||||
|
||||
type_ptr parsed_type_var::to_type(
|
||||
const std::set<std::string>& vars,
|
||||
const type_env& e) const {
|
||||
if(vars.find(var) == vars.end())
|
||||
throw type_error("the type variable " + var + " was not explicitly declared.");
|
||||
return type_ptr(new type_var(var));
|
||||
}
|
||||
|
||||
|
||||
type_ptr parsed_type_arr::to_type(
|
||||
const std::set<std::string>& vars,
|
||||
const type_env& env) const {
|
||||
auto new_left = left->to_type(vars, env);
|
||||
auto new_right = right->to_type(vars, env);
|
||||
return type_ptr(new type_arr(std::move(new_left), std::move(new_right)));
|
||||
}
|
||||
43
code/compiler/13/parsed_type.hpp
Normal file
@@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include "type_env.hpp"
|
||||
|
||||
struct parsed_type {
|
||||
virtual type_ptr to_type(
|
||||
const std::set<std::string>& vars,
|
||||
const type_env& env) const = 0;
|
||||
};
|
||||
|
||||
using parsed_type_ptr = std::unique_ptr<parsed_type>;
|
||||
|
||||
struct parsed_type_app : parsed_type {
|
||||
std::string name;
|
||||
std::vector<parsed_type_ptr> arguments;
|
||||
|
||||
parsed_type_app(
|
||||
std::string n,
|
||||
std::vector<parsed_type_ptr> as)
|
||||
: name(std::move(n)), arguments(std::move(as)) {}
|
||||
|
||||
type_ptr to_type(const std::set<std::string>& vars, const type_env& env) const;
|
||||
};
|
||||
|
||||
struct parsed_type_var : parsed_type {
|
||||
std::string var;
|
||||
|
||||
parsed_type_var(std::string v) : var(std::move(v)) {}
|
||||
|
||||
type_ptr to_type(const std::set<std::string>& vars, const type_env& env) const;
|
||||
};
|
||||
|
||||
struct parsed_type_arr : parsed_type {
|
||||
parsed_type_ptr left;
|
||||
parsed_type_ptr right;
|
||||
|
||||
parsed_type_arr(parsed_type_ptr l, parsed_type_ptr r)
|
||||
: left(std::move(l)), right(std::move(r)) {}
|
||||
|
||||
type_ptr to_type(const std::set<std::string>& vars, const type_env& env) const;
|
||||
};
|
||||
180
code/compiler/13/parser.y
Normal file
@@ -0,0 +1,180 @@
|
||||
%code requires {
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "ast.hpp"
|
||||
#include "definition.hpp"
|
||||
#include "parser.hpp"
|
||||
#include "parsed_type.hpp"
|
||||
|
||||
class parse_driver;
|
||||
using yyscan_t = void*;
|
||||
}
|
||||
|
||||
%param { yyscan_t scanner }
|
||||
%param { parse_driver& drv }
|
||||
|
||||
%code {
|
||||
#include "parse_driver.hpp"
|
||||
}
|
||||
|
||||
%token BACKSLASH
|
||||
%token PLUS
|
||||
%token TIMES
|
||||
%token MINUS
|
||||
%token DIVIDE
|
||||
%token <int> INT
|
||||
%token DEFN
|
||||
%token DATA
|
||||
%token CASE
|
||||
%token OF
|
||||
%token LET
|
||||
%token IN
|
||||
%token OCURLY
|
||||
%token CCURLY
|
||||
%token OPAREN
|
||||
%token CPAREN
|
||||
%token COMMA
|
||||
%token ARROW
|
||||
%token EQUAL
|
||||
%token <std::string> LID
|
||||
%token <std::string> UID
|
||||
|
||||
%language "c++"
|
||||
%define api.value.type variant
|
||||
%define api.token.constructor
|
||||
|
||||
%locations
|
||||
|
||||
%type <std::vector<std::string>> lowercaseParams
|
||||
%type <std::vector<branch_ptr>> branches
|
||||
%type <std::vector<constructor_ptr>> constructors
|
||||
%type <std::vector<parsed_type_ptr>> typeList
|
||||
%type <definition_group> definitions
|
||||
%type <parsed_type_ptr> type nonArrowType typeListElement
|
||||
%type <ast_ptr> aAdd aMul case let lambda app appBase
|
||||
%type <definition_data_ptr> data
|
||||
%type <definition_defn_ptr> defn
|
||||
%type <branch_ptr> branch
|
||||
%type <pattern_ptr> pattern
|
||||
%type <constructor_ptr> constructor
|
||||
|
||||
%start program
|
||||
|
||||
%%
|
||||
|
||||
program
|
||||
: definitions { $1.vis = visibility::global; std::swap(drv.get_global_defs(), $1); }
|
||||
;
|
||||
|
||||
definitions
|
||||
: definitions defn { $$ = std::move($1); auto name = $2->name; $$.defs_defn[name] = std::move($2); }
|
||||
| definitions data { $$ = std::move($1); auto name = $2->name; $$.defs_data[name] = std::move($2); }
|
||||
| %empty { $$ = definition_group(); }
|
||||
;
|
||||
|
||||
defn
|
||||
: DEFN LID lowercaseParams EQUAL OCURLY aAdd CCURLY
|
||||
{ $$ = definition_defn_ptr(
|
||||
new definition_defn(std::move($2), std::move($3), std::move($6), @$)); }
|
||||
;
|
||||
|
||||
lowercaseParams
|
||||
: %empty { $$ = std::vector<std::string>(); }
|
||||
| lowercaseParams LID { $$ = std::move($1); $$.push_back(std::move($2)); }
|
||||
;
|
||||
|
||||
aAdd
|
||||
: aAdd PLUS aMul { $$ = ast_ptr(new ast_binop(PLUS, std::move($1), std::move($3), @$)); }
|
||||
| aAdd MINUS aMul { $$ = ast_ptr(new ast_binop(MINUS, std::move($1), std::move($3), @$)); }
|
||||
| aMul { $$ = std::move($1); }
|
||||
;
|
||||
|
||||
aMul
|
||||
: aMul TIMES app { $$ = ast_ptr(new ast_binop(TIMES, std::move($1), std::move($3), @$)); }
|
||||
| aMul DIVIDE app { $$ = ast_ptr(new ast_binop(DIVIDE, std::move($1), std::move($3), @$)); }
|
||||
| app { $$ = std::move($1); }
|
||||
;
|
||||
|
||||
app
|
||||
: app appBase { $$ = ast_ptr(new ast_app(std::move($1), std::move($2), @$)); }
|
||||
| appBase { $$ = std::move($1); }
|
||||
;
|
||||
|
||||
appBase
|
||||
: INT { $$ = ast_ptr(new ast_int($1, @$)); }
|
||||
| LID { $$ = ast_ptr(new ast_lid(std::move($1), @$)); }
|
||||
| UID { $$ = ast_ptr(new ast_uid(std::move($1), @$)); }
|
||||
| OPAREN aAdd CPAREN { $$ = std::move($2); }
|
||||
| case { $$ = std::move($1); }
|
||||
| let { $$ = std::move($1); }
|
||||
| lambda { $$ = std::move($1); }
|
||||
;
|
||||
|
||||
let
|
||||
: LET OCURLY definitions CCURLY IN OCURLY aAdd CCURLY
|
||||
{ $$ = ast_ptr(new ast_let(std::move($3), std::move($7), @$)); }
|
||||
;
|
||||
|
||||
lambda
|
||||
: BACKSLASH lowercaseParams ARROW OCURLY aAdd CCURLY
|
||||
{ $$ = ast_ptr(new ast_lambda(std::move($2), std::move($5), @$)); }
|
||||
;
|
||||
|
||||
case
|
||||
: CASE aAdd OF OCURLY branches CCURLY
|
||||
{ $$ = ast_ptr(new ast_case(std::move($2), std::move($5), @$)); }
|
||||
;
|
||||
|
||||
branches
|
||||
: branches branch { $$ = std::move($1); $$.push_back(std::move($2)); }
|
||||
| branch { $$ = std::vector<branch_ptr>(); $$.push_back(std::move($1));}
|
||||
;
|
||||
|
||||
branch
|
||||
: pattern ARROW OCURLY aAdd CCURLY
|
||||
{ $$ = branch_ptr(new branch(std::move($1), std::move($4))); }
|
||||
;
|
||||
|
||||
pattern
|
||||
: LID { $$ = pattern_ptr(new pattern_var(std::move($1), @$)); }
|
||||
| UID lowercaseParams
|
||||
{ $$ = pattern_ptr(new pattern_constr(std::move($1), std::move($2), @$)); }
|
||||
;
|
||||
|
||||
data
|
||||
: DATA UID lowercaseParams EQUAL OCURLY constructors CCURLY
|
||||
{ $$ = definition_data_ptr(new definition_data(std::move($2), std::move($3), std::move($6), @$)); }
|
||||
;
|
||||
|
||||
constructors
|
||||
: constructors COMMA constructor { $$ = std::move($1); $$.push_back(std::move($3)); }
|
||||
| constructor
|
||||
{ $$ = std::vector<constructor_ptr>(); $$.push_back(std::move($1)); }
|
||||
;
|
||||
|
||||
constructor
|
||||
: UID typeList
|
||||
{ $$ = constructor_ptr(new constructor(std::move($1), std::move($2))); }
|
||||
;
|
||||
|
||||
type
|
||||
: nonArrowType ARROW type { $$ = parsed_type_ptr(new parsed_type_arr(std::move($1), std::move($3))); }
|
||||
| nonArrowType { $$ = std::move($1); }
|
||||
;
|
||||
|
||||
nonArrowType
|
||||
: UID typeList { $$ = parsed_type_ptr(new parsed_type_app(std::move($1), std::move($2))); }
|
||||
| LID { $$ = parsed_type_ptr(new parsed_type_var(std::move($1))); }
|
||||
| OPAREN type CPAREN { $$ = std::move($2); }
|
||||
;
|
||||
|
||||
typeListElement
|
||||
: OPAREN type CPAREN { $$ = std::move($2); }
|
||||
| UID { $$ = parsed_type_ptr(new parsed_type_app(std::move($1), {})); }
|
||||
| LID { $$ = parsed_type_ptr(new parsed_type_var(std::move($1))); }
|
||||
;
|
||||
|
||||
typeList
|
||||
: %empty { $$ = std::vector<parsed_type_ptr>(); }
|
||||
| typeList typeListElement { $$ = std::move($1); $$.push_back(std::move($2)); }
|
||||
;
|
||||
269
code/compiler/13/runtime.c
Normal file
@@ -0,0 +1,269 @@
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
#include <memory.h>
|
||||
#include <stdio.h>
|
||||
#include "runtime.h"
|
||||
|
||||
struct node_base* alloc_node() {
|
||||
struct node_base* new_node = malloc(sizeof(struct node_app));
|
||||
new_node->gc_next = NULL;
|
||||
new_node->gc_reachable = 0;
|
||||
assert(new_node != NULL);
|
||||
return new_node;
|
||||
}
|
||||
|
||||
struct node_app* alloc_app(struct node_base* l, struct node_base* r) {
|
||||
struct node_app* node = (struct node_app*) alloc_node();
|
||||
node->base.tag = NODE_APP;
|
||||
node->left = l;
|
||||
node->right = r;
|
||||
return node;
|
||||
}
|
||||
|
||||
struct node_num* alloc_num(int32_t n) {
|
||||
struct node_num* node = (struct node_num*) alloc_node();
|
||||
node->base.tag = NODE_NUM;
|
||||
node->value = n;
|
||||
return node;
|
||||
}
|
||||
|
||||
struct node_global* alloc_global(void (*f)(struct gmachine*), int32_t a) {
|
||||
struct node_global* node = (struct node_global*) alloc_node();
|
||||
node->base.tag = NODE_GLOBAL;
|
||||
node->arity = a;
|
||||
node->function = f;
|
||||
return node;
|
||||
}
|
||||
|
||||
struct node_ind* alloc_ind(struct node_base* n) {
|
||||
struct node_ind* node = (struct node_ind*) alloc_node();
|
||||
node->base.tag = NODE_IND;
|
||||
node->next = n;
|
||||
return node;
|
||||
}
|
||||
|
||||
void free_node_direct(struct node_base* n) {
|
||||
if(n->tag == NODE_DATA) {
|
||||
free(((struct node_data*) n)->array);
|
||||
}
|
||||
}
|
||||
|
||||
void gc_visit_node(struct node_base* n) {
|
||||
if(n->gc_reachable) return;
|
||||
n->gc_reachable = 1;
|
||||
|
||||
if(n->tag == NODE_APP) {
|
||||
struct node_app* app = (struct node_app*) n;
|
||||
gc_visit_node(app->left);
|
||||
gc_visit_node(app->right);
|
||||
} if(n->tag == NODE_IND) {
|
||||
struct node_ind* ind = (struct node_ind*) n;
|
||||
gc_visit_node(ind->next);
|
||||
} if(n->tag == NODE_DATA) {
|
||||
struct node_data* data = (struct node_data*) n;
|
||||
struct node_base** to_visit = data->array;
|
||||
while(*to_visit) {
|
||||
gc_visit_node(*to_visit);
|
||||
to_visit++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void stack_init(struct stack* s) {
|
||||
s->size = 4;
|
||||
s->count = 0;
|
||||
s->data = malloc(sizeof(*s->data) * s->size);
|
||||
assert(s->data != NULL);
|
||||
}
|
||||
|
||||
void stack_free(struct stack* s) {
|
||||
free(s->data);
|
||||
}
|
||||
|
||||
void stack_push(struct stack* s, struct node_base* n) {
|
||||
while(s->count >= s->size) {
|
||||
s->data = realloc(s->data, sizeof(*s->data) * (s->size *= 2));
|
||||
assert(s->data != NULL);
|
||||
}
|
||||
s->data[s->count++] = n;
|
||||
}
|
||||
|
||||
struct node_base* stack_pop(struct stack* s) {
|
||||
assert(s->count > 0);
|
||||
return s->data[--s->count];
|
||||
}
|
||||
|
||||
struct node_base* stack_peek(struct stack* s, size_t o) {
|
||||
assert(s->count > o);
|
||||
return s->data[s->count - o - 1];
|
||||
}
|
||||
|
||||
void stack_popn(struct stack* s, size_t n) {
|
||||
assert(s->count >= n);
|
||||
s->count -= n;
|
||||
}
|
||||
|
||||
void gmachine_init(struct gmachine* g) {
|
||||
stack_init(&g->stack);
|
||||
g->gc_nodes = NULL;
|
||||
g->gc_node_count = 0;
|
||||
g->gc_node_threshold = 128;
|
||||
}
|
||||
|
||||
void gmachine_free(struct gmachine* g) {
|
||||
stack_free(&g->stack);
|
||||
struct node_base* to_free = g->gc_nodes;
|
||||
struct node_base* next;
|
||||
|
||||
while(to_free) {
|
||||
next = to_free->gc_next;
|
||||
free_node_direct(to_free);
|
||||
free(to_free);
|
||||
to_free = next;
|
||||
}
|
||||
}
|
||||
|
||||
void gmachine_slide(struct gmachine* g, size_t n) {
|
||||
assert(g->stack.count > n);
|
||||
g->stack.data[g->stack.count - n - 1] = g->stack.data[g->stack.count - 1];
|
||||
g->stack.count -= n;
|
||||
}
|
||||
|
||||
void gmachine_update(struct gmachine* g, size_t o) {
|
||||
assert(g->stack.count > o + 1);
|
||||
struct node_ind* ind =
|
||||
(struct node_ind*) g->stack.data[g->stack.count - o - 2];
|
||||
ind->base.tag = NODE_IND;
|
||||
ind->next = g->stack.data[g->stack.count -= 1];
|
||||
}
|
||||
|
||||
void gmachine_alloc(struct gmachine* g, size_t o) {
|
||||
while(o--) {
|
||||
stack_push(&g->stack,
|
||||
gmachine_track(g, (struct node_base*) alloc_ind(NULL)));
|
||||
}
|
||||
}
|
||||
|
||||
void gmachine_pack(struct gmachine* g, size_t n, int8_t t) {
|
||||
assert(g->stack.count >= n);
|
||||
|
||||
struct node_base** data = malloc(sizeof(*data) * (n + 1));
|
||||
assert(data != NULL);
|
||||
memcpy(data, &g->stack.data[g->stack.count - n], n * sizeof(*data));
|
||||
data[n] = NULL;
|
||||
|
||||
struct node_data* new_node = (struct node_data*) alloc_node();
|
||||
new_node->array = data;
|
||||
new_node->base.tag = NODE_DATA;
|
||||
new_node->tag = t;
|
||||
|
||||
stack_popn(&g->stack, n);
|
||||
stack_push(&g->stack, gmachine_track(g, (struct node_base*) new_node));
|
||||
}
|
||||
|
||||
void gmachine_split(struct gmachine* g, size_t n) {
|
||||
struct node_data* node = (struct node_data*) stack_pop(&g->stack);
|
||||
for(size_t i = 0; i < n; i++) {
|
||||
stack_push(&g->stack, node->array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
struct node_base* gmachine_track(struct gmachine* g, struct node_base* b) {
|
||||
g->gc_node_count++;
|
||||
b->gc_next = g->gc_nodes;
|
||||
g->gc_nodes = b;
|
||||
|
||||
if(g->gc_node_count >= g->gc_node_threshold) {
|
||||
uint64_t nodes_before = g->gc_node_count;
|
||||
gc_visit_node(b);
|
||||
gmachine_gc(g);
|
||||
g->gc_node_threshold = g->gc_node_count * 2;
|
||||
}
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
void gmachine_gc(struct gmachine* g) {
|
||||
for(size_t i = 0; i < g->stack.count; i++) {
|
||||
gc_visit_node(g->stack.data[i]);
|
||||
}
|
||||
|
||||
struct node_base** head_ptr = &g->gc_nodes;
|
||||
while(*head_ptr) {
|
||||
if((*head_ptr)->gc_reachable) {
|
||||
(*head_ptr)->gc_reachable = 0;
|
||||
head_ptr = &(*head_ptr)->gc_next;
|
||||
} else {
|
||||
struct node_base* to_free = *head_ptr;
|
||||
*head_ptr = to_free->gc_next;
|
||||
free_node_direct(to_free);
|
||||
free(to_free);
|
||||
g->gc_node_count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void unwind(struct gmachine* g) {
|
||||
struct stack* s = &g->stack;
|
||||
|
||||
while(1) {
|
||||
struct node_base* peek = stack_peek(s, 0);
|
||||
if(peek->tag == NODE_APP) {
|
||||
struct node_app* n = (struct node_app*) peek;
|
||||
stack_push(s, n->left);
|
||||
} else if(peek->tag == NODE_GLOBAL) {
|
||||
struct node_global* n = (struct node_global*) peek;
|
||||
assert(s->count > n->arity);
|
||||
|
||||
for(size_t i = 1; i <= n->arity; i++) {
|
||||
s->data[s->count - i]
|
||||
= ((struct node_app*) s->data[s->count - i - 1])->right;
|
||||
}
|
||||
|
||||
n->function(g);
|
||||
} else if(peek->tag == NODE_IND) {
|
||||
struct node_ind* n = (struct node_ind*) peek;
|
||||
stack_pop(s);
|
||||
stack_push(s, n->next);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern void f_main(struct gmachine* s);
|
||||
|
||||
void print_node(struct node_base* n) {
|
||||
if(n->tag == NODE_APP) {
|
||||
struct node_app* app = (struct node_app*) n;
|
||||
print_node(app->left);
|
||||
putchar(' ');
|
||||
print_node(app->right);
|
||||
} else if(n->tag == NODE_DATA) {
|
||||
printf("(Packed)");
|
||||
} else if(n->tag == NODE_GLOBAL) {
|
||||
struct node_global* global = (struct node_global*) n;
|
||||
printf("(Global: %p)", global->function);
|
||||
} else if(n->tag == NODE_IND) {
|
||||
print_node(((struct node_ind*) n)->next);
|
||||
} else if(n->tag == NODE_NUM) {
|
||||
struct node_num* num = (struct node_num*) n;
|
||||
printf("%d", num->value);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
struct gmachine gmachine;
|
||||
struct node_global* first_node = alloc_global(f_main, 0);
|
||||
struct node_base* result;
|
||||
|
||||
gmachine_init(&gmachine);
|
||||
gmachine_track(&gmachine, (struct node_base*) first_node);
|
||||
stack_push(&gmachine.stack, (struct node_base*) first_node);
|
||||
unwind(&gmachine);
|
||||
result = stack_pop(&gmachine.stack);
|
||||
printf("Result: ");
|
||||
print_node(result);
|
||||
putchar('\n');
|
||||
gmachine_free(&gmachine);
|
||||
}
|
||||
84
code/compiler/13/runtime.h
Normal file
@@ -0,0 +1,84 @@
|
||||
#pragma once
|
||||
#include <stdlib.h>
|
||||
|
||||
struct gmachine;
|
||||
|
||||
enum node_tag {
|
||||
NODE_APP,
|
||||
NODE_NUM,
|
||||
NODE_GLOBAL,
|
||||
NODE_IND,
|
||||
NODE_DATA
|
||||
};
|
||||
|
||||
struct node_base {
|
||||
enum node_tag tag;
|
||||
int8_t gc_reachable;
|
||||
struct node_base* gc_next;
|
||||
};
|
||||
|
||||
struct node_app {
|
||||
struct node_base base;
|
||||
struct node_base* left;
|
||||
struct node_base* right;
|
||||
};
|
||||
|
||||
struct node_num {
|
||||
struct node_base base;
|
||||
int32_t value;
|
||||
};
|
||||
|
||||
struct node_global {
|
||||
struct node_base base;
|
||||
int32_t arity;
|
||||
void (*function)(struct gmachine*);
|
||||
};
|
||||
|
||||
struct node_ind {
|
||||
struct node_base base;
|
||||
struct node_base* next;
|
||||
};
|
||||
|
||||
struct node_data {
|
||||
struct node_base base;
|
||||
int8_t tag;
|
||||
struct node_base** array;
|
||||
};
|
||||
|
||||
struct node_base* alloc_node();
|
||||
struct node_app* alloc_app(struct node_base* l, struct node_base* r);
|
||||
struct node_num* alloc_num(int32_t n);
|
||||
struct node_global* alloc_global(void (*f)(struct gmachine*), int32_t a);
|
||||
struct node_ind* alloc_ind(struct node_base* n);
|
||||
void free_node_direct(struct node_base*);
|
||||
void gc_visit_node(struct node_base*);
|
||||
|
||||
struct stack {
|
||||
size_t size;
|
||||
size_t count;
|
||||
struct node_base** data;
|
||||
};
|
||||
|
||||
void stack_init(struct stack* s);
|
||||
void stack_free(struct stack* s);
|
||||
void stack_push(struct stack* s, struct node_base* n);
|
||||
struct node_base* stack_pop(struct stack* s);
|
||||
struct node_base* stack_peek(struct stack* s, size_t o);
|
||||
void stack_popn(struct stack* s, size_t n);
|
||||
|
||||
struct gmachine {
|
||||
struct stack stack;
|
||||
struct node_base* gc_nodes;
|
||||
int64_t gc_node_count;
|
||||
int64_t gc_node_threshold;
|
||||
};
|
||||
|
||||
void gmachine_init(struct gmachine* g);
|
||||
void gmachine_free(struct gmachine* g);
|
||||
void gmachine_slide(struct gmachine* g, size_t n);
|
||||
void gmachine_update(struct gmachine* g, size_t o);
|
||||
void gmachine_alloc(struct gmachine* g, size_t o);
|
||||
void gmachine_pack(struct gmachine* g, size_t n, int8_t t);
|
||||
void gmachine_split(struct gmachine* g, size_t n);
|
||||
struct node_base* gmachine_track(struct gmachine* g, struct node_base* b);
|
||||
void gmachine_gc(struct gmachine* g);
|
||||
45
code/compiler/13/scanner.l
Normal file
@@ -0,0 +1,45 @@
|
||||
%option noyywrap
|
||||
%option reentrant
|
||||
%option header-file="scanner.hpp"
|
||||
|
||||
%{
|
||||
#include <iostream>
|
||||
#include "ast.hpp"
|
||||
#include "definition.hpp"
|
||||
#include "parse_driver.hpp"
|
||||
#include "parser.hpp"
|
||||
|
||||
#define YY_USER_ACTION \
|
||||
drv.get_file_manager().write(yytext, yyleng); \
|
||||
LOC.step(); LOC.columns(yyleng);
|
||||
#define LOC drv.get_current_location()
|
||||
%}
|
||||
|
||||
%%
|
||||
|
||||
\n { drv.get_current_location().lines(); drv.get_file_manager().mark_line(); }
|
||||
[ ]+ {}
|
||||
\\ { return yy::parser::make_BACKSLASH(LOC); }
|
||||
\+ { return yy::parser::make_PLUS(LOC); }
|
||||
\* { return yy::parser::make_TIMES(LOC); }
|
||||
- { return yy::parser::make_MINUS(LOC); }
|
||||
\/ { return yy::parser::make_DIVIDE(LOC); }
|
||||
[0-9]+ { return yy::parser::make_INT(atoi(yytext), LOC); }
|
||||
defn { return yy::parser::make_DEFN(LOC); }
|
||||
data { return yy::parser::make_DATA(LOC); }
|
||||
case { return yy::parser::make_CASE(LOC); }
|
||||
of { return yy::parser::make_OF(LOC); }
|
||||
let { return yy::parser::make_LET(LOC); }
|
||||
in { return yy::parser::make_IN(LOC); }
|
||||
\{ { return yy::parser::make_OCURLY(LOC); }
|
||||
\} { return yy::parser::make_CCURLY(LOC); }
|
||||
\( { return yy::parser::make_OPAREN(LOC); }
|
||||
\) { return yy::parser::make_CPAREN(LOC); }
|
||||
, { return yy::parser::make_COMMA(LOC); }
|
||||
-> { return yy::parser::make_ARROW(LOC); }
|
||||
= { return yy::parser::make_EQUAL(LOC); }
|
||||
[a-z][a-zA-Z]* { return yy::parser::make_LID(std::string(yytext), LOC); }
|
||||
[A-Z][a-zA-Z]* { return yy::parser::make_UID(std::string(yytext), LOC); }
|
||||
<<EOF>> { return yy::parser::make_YYEOF(LOC); }
|
||||
|
||||
%%
|
||||
23
code/compiler/13/test.cpp
Normal file
@@ -0,0 +1,23 @@
|
||||
#include "graph.hpp"
|
||||
|
||||
int main() {
|
||||
function_graph graph;
|
||||
graph.add_edge("f", "g");
|
||||
graph.add_edge("g", "h");
|
||||
graph.add_edge("h", "f");
|
||||
|
||||
graph.add_edge("i", "j");
|
||||
graph.add_edge("j", "i");
|
||||
|
||||
graph.add_edge("j", "f");
|
||||
|
||||
graph.add_edge("x", "f");
|
||||
graph.add_edge("x", "i");
|
||||
|
||||
for(auto& group : graph.compute_order()) {
|
||||
std::cout << "Group: " << std::endl;
|
||||
for(auto& member : group->members) {
|
||||
std::cout << member << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
213
code/compiler/13/type.cpp
Normal file
@@ -0,0 +1,213 @@
|
||||
#include "type.hpp"
|
||||
#include <ostream>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include "error.hpp"
|
||||
|
||||
void type_scheme::print(const type_mgr& mgr, std::ostream& to) const {
|
||||
if(forall.size() != 0) {
|
||||
to << "forall ";
|
||||
for(auto& var : forall) {
|
||||
to << var << " ";
|
||||
}
|
||||
to << ". ";
|
||||
}
|
||||
monotype->print(mgr, to);
|
||||
}
|
||||
|
||||
type_ptr type_scheme::instantiate(type_mgr& mgr) const {
|
||||
if(forall.size() == 0) return monotype;
|
||||
std::map<std::string, type_ptr> subst;
|
||||
for(auto& var : forall) {
|
||||
subst[var] = mgr.new_type();
|
||||
}
|
||||
return mgr.substitute(subst, monotype);
|
||||
}
|
||||
|
||||
void type_var::print(const type_mgr& mgr, std::ostream& to) const {
|
||||
auto type = mgr.lookup(name);
|
||||
if(type) {
|
||||
type->print(mgr, to);
|
||||
} else {
|
||||
to << name;
|
||||
}
|
||||
}
|
||||
|
||||
void type_base::print(const type_mgr& mgr, std::ostream& to) const {
|
||||
to << name;
|
||||
}
|
||||
|
||||
void type_arr::print(const type_mgr& mgr, std::ostream& to) const {
|
||||
type_var* var;
|
||||
bool print_parenths = dynamic_cast<type_arr*>(mgr.resolve(left, var).get()) != nullptr;
|
||||
if(print_parenths) to << "(";
|
||||
left->print(mgr, to);
|
||||
if(print_parenths) to << ")";
|
||||
to << " -> ";
|
||||
right->print(mgr, to);
|
||||
}
|
||||
|
||||
void type_app::print(const type_mgr& mgr, std::ostream& to) const {
|
||||
constructor->print(mgr, to);
|
||||
to << "*";
|
||||
for(auto& arg : arguments) {
|
||||
to << " ";
|
||||
arg->print(mgr, to);
|
||||
}
|
||||
}
|
||||
|
||||
std::string type_mgr::new_type_name() {
|
||||
int temp = last_id++;
|
||||
std::string str = "";
|
||||
|
||||
while(temp != -1) {
|
||||
str += (char) ('a' + (temp % 26));
|
||||
temp = temp / 26 - 1;
|
||||
}
|
||||
|
||||
std::reverse(str.begin(), str.end());
|
||||
return str;
|
||||
}
|
||||
|
||||
type_ptr type_mgr::new_type() {
|
||||
return type_ptr(new type_var(new_type_name()));
|
||||
}
|
||||
|
||||
type_ptr type_mgr::new_arrow_type() {
|
||||
return type_ptr(new type_arr(new_type(), new_type()));
|
||||
}
|
||||
|
||||
type_ptr type_mgr::lookup(const std::string& var) const {
|
||||
auto types_it = types.find(var);
|
||||
if(types_it != types.end()) return types_it->second;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
type_ptr type_mgr::resolve(type_ptr t, type_var*& var) const {
|
||||
type_var* cast;
|
||||
|
||||
var = nullptr;
|
||||
while((cast = dynamic_cast<type_var*>(t.get()))) {
|
||||
auto it = types.find(cast->name);
|
||||
|
||||
if(it == types.end()) {
|
||||
var = cast;
|
||||
break;
|
||||
}
|
||||
t = it->second;
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
void type_mgr::unify(type_ptr l, type_ptr r, const std::optional<yy::location>& loc) {
|
||||
type_var *lvar, *rvar;
|
||||
type_arr *larr, *rarr;
|
||||
type_base *lid, *rid;
|
||||
type_app *lapp, *rapp;
|
||||
|
||||
l = resolve(l, lvar);
|
||||
r = resolve(r, rvar);
|
||||
|
||||
if(lvar) {
|
||||
bind(lvar->name, r);
|
||||
return;
|
||||
} else if(rvar) {
|
||||
bind(rvar->name, l);
|
||||
return;
|
||||
} else if((larr = dynamic_cast<type_arr*>(l.get())) &&
|
||||
(rarr = dynamic_cast<type_arr*>(r.get()))) {
|
||||
unify(larr->left, rarr->left, loc);
|
||||
unify(larr->right, rarr->right, loc);
|
||||
return;
|
||||
} else if((lid = dynamic_cast<type_base*>(l.get())) &&
|
||||
(rid = dynamic_cast<type_base*>(r.get()))) {
|
||||
if(lid->name == rid->name &&
|
||||
lid->arity == rid->arity)
|
||||
return;
|
||||
} else if((lapp = dynamic_cast<type_app*>(l.get())) &&
|
||||
(rapp = dynamic_cast<type_app*>(r.get()))) {
|
||||
unify(lapp->constructor, rapp->constructor, loc);
|
||||
auto left_it = lapp->arguments.begin();
|
||||
auto right_it = rapp->arguments.begin();
|
||||
while(left_it != lapp->arguments.end() &&
|
||||
right_it != rapp->arguments.end()) {
|
||||
unify(*left_it, *right_it, loc);
|
||||
left_it++, right_it++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw unification_error(l, r, loc);
|
||||
}
|
||||
|
||||
type_ptr type_mgr::substitute(const std::map<std::string, type_ptr>& subst, const type_ptr& t) const {
|
||||
type_ptr temp = t;
|
||||
while(type_var* var = dynamic_cast<type_var*>(temp.get())) {
|
||||
auto subst_it = subst.find(var->name);
|
||||
if(subst_it != subst.end()) return subst_it->second;
|
||||
auto var_it = types.find(var->name);
|
||||
if(var_it == types.end()) return t;
|
||||
temp = var_it->second;
|
||||
}
|
||||
|
||||
if(type_arr* arr = dynamic_cast<type_arr*>(temp.get())) {
|
||||
auto left_result = substitute(subst, arr->left);
|
||||
auto right_result = substitute(subst, arr->right);
|
||||
if(left_result == arr->left && right_result == arr->right) return t;
|
||||
return type_ptr(new type_arr(left_result, right_result));
|
||||
} else if(type_app* app = dynamic_cast<type_app*>(temp.get())) {
|
||||
auto constructor_result = substitute(subst, app->constructor);
|
||||
bool arg_changed = false;
|
||||
std::vector<type_ptr> new_args;
|
||||
for(auto& arg : app->arguments) {
|
||||
auto arg_result = substitute(subst, arg);
|
||||
arg_changed |= arg_result != arg;
|
||||
new_args.push_back(std::move(arg_result));
|
||||
}
|
||||
|
||||
if(constructor_result == app->constructor && !arg_changed) return t;
|
||||
type_app* new_app = new type_app(std::move(constructor_result));
|
||||
std::swap(new_app->arguments, new_args);
|
||||
return type_ptr(new_app);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
void type_mgr::bind(const std::string& s, type_ptr t) {
|
||||
type_var* other = dynamic_cast<type_var*>(t.get());
|
||||
|
||||
if(other && other->name == s) return;
|
||||
types[s] = t;
|
||||
}
|
||||
|
||||
void type_mgr::find_free(const type_ptr& t, std::set<std::string>& into) const {
|
||||
type_var* var;
|
||||
type_ptr resolved = resolve(t, var);
|
||||
|
||||
if(var) {
|
||||
into.insert(var->name);
|
||||
} else if(type_arr* arr = dynamic_cast<type_arr*>(resolved.get())) {
|
||||
find_free(arr->left, into);
|
||||
find_free(arr->right, into);
|
||||
} else if(type_app* app = dynamic_cast<type_app*>(resolved.get())) {
|
||||
find_free(app->constructor, into);
|
||||
for(auto& arg : app->arguments) find_free(arg, into);
|
||||
}
|
||||
}
|
||||
|
||||
void type_mgr::find_free(const type_scheme_ptr& t, std::set<std::string>& into) const {
|
||||
std::set<std::string> monotype_free;
|
||||
type_mgr limited_mgr;
|
||||
for(auto& binding : types) {
|
||||
auto existing_position = std::find(t->forall.begin(), t->forall.end(), binding.first);
|
||||
if(existing_position != t->forall.end()) continue;
|
||||
limited_mgr.types[binding.first] = binding.second;
|
||||
}
|
||||
limited_mgr.find_free(t->monotype, monotype_free);
|
||||
for(auto& not_free : t->forall) {
|
||||
monotype_free.erase(not_free);
|
||||
}
|
||||
into.insert(monotype_free.begin(), monotype_free.end());
|
||||
}
|
||||
101
code/compiler/13/type.hpp
Normal file
@@ -0,0 +1,101 @@
|
||||
#pragma once
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <optional>
|
||||
#include "location.hh"
|
||||
|
||||
class type_mgr;
|
||||
|
||||
struct type {
|
||||
virtual ~type() = default;
|
||||
|
||||
virtual void print(const type_mgr& mgr, std::ostream& to) const = 0;
|
||||
};
|
||||
|
||||
using type_ptr = std::shared_ptr<type>;
|
||||
|
||||
struct type_scheme {
|
||||
std::vector<std::string> forall;
|
||||
type_ptr monotype;
|
||||
|
||||
type_scheme(type_ptr type) : forall(), monotype(std::move(type)) {}
|
||||
|
||||
void print(const type_mgr& mgr, std::ostream& to) const;
|
||||
type_ptr instantiate(type_mgr& mgr) const;
|
||||
};
|
||||
|
||||
using type_scheme_ptr = std::shared_ptr<type_scheme>;
|
||||
|
||||
struct type_var : public type {
|
||||
std::string name;
|
||||
|
||||
type_var(std::string n)
|
||||
: name(std::move(n)) {}
|
||||
|
||||
void print(const type_mgr& mgr, std::ostream& to) const;
|
||||
};
|
||||
|
||||
struct type_base : public type {
|
||||
std::string name;
|
||||
int32_t arity;
|
||||
|
||||
type_base(std::string n, int32_t a = 0)
|
||||
: name(std::move(n)), arity(a) {}
|
||||
|
||||
void print(const type_mgr& mgr, std::ostream& to) const;
|
||||
};
|
||||
|
||||
struct type_data : public type_base {
|
||||
struct constructor {
|
||||
int tag;
|
||||
};
|
||||
|
||||
std::map<std::string, constructor> constructors;
|
||||
|
||||
type_data(std::string n, int32_t a = 0)
|
||||
: type_base(std::move(n), a) {}
|
||||
};
|
||||
|
||||
struct type_arr : public type {
|
||||
type_ptr left;
|
||||
type_ptr right;
|
||||
|
||||
type_arr(type_ptr l, type_ptr r)
|
||||
: left(std::move(l)), right(std::move(r)) {}
|
||||
|
||||
void print(const type_mgr& mgr, std::ostream& to) const;
|
||||
};
|
||||
|
||||
struct type_app : public type {
|
||||
type_ptr constructor;
|
||||
std::vector<type_ptr> arguments;
|
||||
|
||||
type_app(type_ptr c)
|
||||
: constructor(std::move(c)) {}
|
||||
|
||||
void print(const type_mgr& mgr, std::ostream& to) const;
|
||||
};
|
||||
|
||||
class type_mgr {
|
||||
private:
|
||||
int last_id = 0;
|
||||
std::map<std::string, type_ptr> types;
|
||||
|
||||
public:
|
||||
std::string new_type_name();
|
||||
type_ptr new_type();
|
||||
type_ptr new_arrow_type();
|
||||
|
||||
void unify(type_ptr l, type_ptr r, const std::optional<yy::location>& loc = std::nullopt);
|
||||
type_ptr substitute(
|
||||
const std::map<std::string, type_ptr>& subst,
|
||||
const type_ptr& t) const;
|
||||
type_ptr lookup(const std::string& var) const;
|
||||
type_ptr resolve(type_ptr t, type_var*& var) const;
|
||||
void bind(const std::string& s, type_ptr t);
|
||||
void find_free(const type_ptr& t, std::set<std::string>& into) const;
|
||||
void find_free(const type_scheme_ptr& t, std::set<std::string>& into) const;
|
||||
};
|
||||
96
code/compiler/13/type_env.cpp
Normal file
@@ -0,0 +1,96 @@
|
||||
#include "type_env.hpp"
|
||||
#include "type.hpp"
|
||||
#include "error.hpp"
|
||||
#include <cassert>
|
||||
|
||||
void type_env::find_free(const type_mgr& mgr, std::set<std::string>& into) const {
|
||||
if(parent != nullptr) parent->find_free(mgr, into);
|
||||
for(auto& binding : names) {
|
||||
mgr.find_free(binding.second.type, into);
|
||||
}
|
||||
}
|
||||
|
||||
void type_env::find_free_except(const type_mgr& mgr, const group& avoid,
|
||||
std::set<std::string>& into) const {
|
||||
if(parent != nullptr) parent->find_free(mgr, into);
|
||||
for(auto& binding : names) {
|
||||
if(avoid.members.find(binding.first) != avoid.members.end()) continue;
|
||||
mgr.find_free(binding.second.type, into);
|
||||
}
|
||||
}
|
||||
|
||||
type_scheme_ptr type_env::lookup(const std::string& name) const {
|
||||
auto it = names.find(name);
|
||||
if(it != names.end()) return it->second.type;
|
||||
if(parent) return parent->lookup(name);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool type_env::is_global(const std::string& name) const {
|
||||
auto it = names.find(name);
|
||||
if(it != names.end()) return it->second.vis == visibility::global;
|
||||
if(parent) return parent->is_global(name);
|
||||
return false;
|
||||
}
|
||||
|
||||
void type_env::set_mangled_name(const std::string& name, const std::string& mangled) {
|
||||
auto it = names.find(name);
|
||||
|
||||
// Can't set mangled name for non-existent variable.
|
||||
assert(it != names.end());
|
||||
// Local names shouldn't need mangling.
|
||||
assert(it->second.vis == visibility::global);
|
||||
|
||||
it->second.mangled_name = mangled;
|
||||
}
|
||||
|
||||
const std::string& type_env::get_mangled_name(const std::string& name) const {
|
||||
auto it = names.find(name);
|
||||
if(it != names.end()) {
|
||||
assert(it->second.mangled_name);
|
||||
return *it->second.mangled_name;
|
||||
}
|
||||
assert(parent != nullptr);
|
||||
return parent->get_mangled_name(name);
|
||||
}
|
||||
|
||||
type_ptr type_env::lookup_type(const std::string& name) const {
|
||||
auto it = type_names.find(name);
|
||||
if(it != type_names.end()) return it->second;
|
||||
if(parent) return parent->lookup_type(name);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void type_env::bind(const std::string& name, type_ptr t, visibility v) {
|
||||
type_scheme_ptr new_scheme(new type_scheme(std::move(t)));
|
||||
names[name] = variable_data(std::move(new_scheme), v, std::nullopt);
|
||||
}
|
||||
|
||||
void type_env::bind(const std::string& name, type_scheme_ptr t, visibility v) {
|
||||
names[name] = variable_data(std::move(t), v, "");
|
||||
}
|
||||
|
||||
void type_env::bind_type(const std::string& type_name, type_ptr t) {
|
||||
if(lookup_type(type_name) != nullptr)
|
||||
throw type_error("redefinition of type");
|
||||
type_names[type_name] = t;
|
||||
}
|
||||
|
||||
void type_env::generalize(const std::string& name, const group& grp, type_mgr& mgr) {
|
||||
auto names_it = names.find(name);
|
||||
assert(names_it != names.end());
|
||||
assert(names_it->second.type->forall.size() == 0);
|
||||
|
||||
std::set<std::string> free_in_type;
|
||||
std::set<std::string> free_in_env;
|
||||
mgr.find_free(names_it->second.type->monotype, free_in_type);
|
||||
find_free_except(mgr, grp, free_in_env);
|
||||
for(auto& free : free_in_type) {
|
||||
if(free_in_env.find(free) != free_in_env.end()) continue;
|
||||
names_it->second.type->forall.push_back(free);
|
||||
}
|
||||
}
|
||||
|
||||
type_env_ptr type_scope(type_env_ptr parent) {
|
||||
return type_env_ptr(new type_env(std::move(parent)));
|
||||
}
|
||||
52
code/compiler/13/type_env.hpp
Normal file
@@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <optional>
|
||||
#include "graph.hpp"
|
||||
#include "type.hpp"
|
||||
|
||||
struct type_env;
|
||||
using type_env_ptr = std::shared_ptr<type_env>;
|
||||
|
||||
enum class visibility { global,local };
|
||||
|
||||
class type_env {
|
||||
private:
|
||||
struct variable_data {
|
||||
type_scheme_ptr type;
|
||||
visibility vis;
|
||||
std::optional<std::string> mangled_name;
|
||||
|
||||
variable_data()
|
||||
: variable_data(nullptr, visibility::local, std::nullopt) {}
|
||||
variable_data(type_scheme_ptr t, visibility v, std::optional<std::string> n)
|
||||
: type(std::move(t)), vis(v), mangled_name(std::move(n)) {}
|
||||
};
|
||||
|
||||
type_env_ptr parent;
|
||||
std::map<std::string, variable_data> names;
|
||||
std::map<std::string, type_ptr> type_names;
|
||||
|
||||
public:
|
||||
type_env(type_env_ptr p) : parent(std::move(p)) {}
|
||||
type_env() : type_env(nullptr) {}
|
||||
|
||||
void find_free(const type_mgr& mgr, std::set<std::string>& into) const;
|
||||
void find_free_except(const type_mgr& mgr, const group& avoid,
|
||||
std::set<std::string>& into) const;
|
||||
type_scheme_ptr lookup(const std::string& name) const;
|
||||
bool is_global(const std::string& name) const;
|
||||
void set_mangled_name(const std::string& name, const std::string& mangled);
|
||||
const std::string& get_mangled_name(const std::string& name) const;
|
||||
type_ptr lookup_type(const std::string& name) const;
|
||||
void bind(const std::string& name, type_ptr t,
|
||||
visibility v = visibility::local);
|
||||
void bind(const std::string& name, type_scheme_ptr t,
|
||||
visibility v = visibility::local);
|
||||
void bind_type(const std::string& type_name, type_ptr t);
|
||||
void generalize(const std::string& name, const group& grp, type_mgr& mgr);
|
||||
};
|
||||
|
||||
|
||||
type_env_ptr type_scope(type_env_ptr parent);
|
||||
21
code/time-traveling/TakeMax.hs
Normal file
@@ -0,0 +1,21 @@
|
||||
takeUntilMax :: [Int] -> Int -> (Int, [Int])
|
||||
takeUntilMax [] m = (m, [])
|
||||
takeUntilMax [x] _ = (x, [x])
|
||||
takeUntilMax (x:xs) m
|
||||
| x == m = (x, [x])
|
||||
| otherwise =
|
||||
let (m', xs') = takeUntilMax xs m
|
||||
in (max m' x, x:xs')
|
||||
|
||||
doTakeUntilMax :: [Int] -> [Int]
|
||||
doTakeUntilMax l = l'
|
||||
where (m, l') = takeUntilMax l m
|
||||
|
||||
takeUntilMax' :: [Int] -> Int -> (Int, [Int])
|
||||
takeUntilMax' [] m = (m, [])
|
||||
takeUntilMax' [x] _ = (x, [x])
|
||||
takeUntilMax' (x:xs) m
|
||||
| x == m = (maximum (x:xs), [x])
|
||||
| otherwise =
|
||||
let (m', xs') = takeUntilMax' xs m
|
||||
in (max m' x, x:xs')
|
||||
28
code/time-traveling/ValueScore.hs
Normal file
@@ -0,0 +1,28 @@
|
||||
import Data.Map as Map
|
||||
import Data.Maybe
|
||||
import Control.Applicative
|
||||
|
||||
data Element = A | B | C | D
|
||||
deriving (Eq, Ord, Show)
|
||||
|
||||
addElement :: Element -> Map Element Int -> Map Element Int
|
||||
addElement = alter ((<|> Just 1) . fmap (+1))
|
||||
|
||||
getScore :: Element -> Map Element Int -> Float
|
||||
getScore e m = fromMaybe 1.0 $ ((1.0/) . fromIntegral) <$> Map.lookup e m
|
||||
|
||||
data BinaryTree a = Empty | Node a (BinaryTree a) (BinaryTree a) deriving Show
|
||||
type ElementTree = BinaryTree Element
|
||||
type ScoredElementTree = BinaryTree (Element, Float)
|
||||
|
||||
assignScores :: ElementTree -> Map Element Int -> (Map Element Int, ScoredElementTree)
|
||||
assignScores Empty m = (Map.empty, Empty)
|
||||
assignScores (Node e t1 t2) m = (m', Node (e, getScore e m) t1' t2')
|
||||
where
|
||||
(m1, t1') = assignScores t1 m
|
||||
(m2, t2') = assignScores t2 m
|
||||
m' = addElement e $ unionWith (+) m1 m2
|
||||
|
||||
doAssignScores :: ElementTree -> ScoredElementTree
|
||||
doAssignScores t = t'
|
||||
where (m, t') = assignScores t m
|
||||
102
code/typesafe-imperative/TypesafeImp.idr
Normal file
@@ -0,0 +1,102 @@
|
||||
data Reg = A | B | R
|
||||
|
||||
data Ty = IntTy | BoolTy
|
||||
|
||||
TypeState : Type
|
||||
TypeState = (Ty, Ty, Ty)
|
||||
|
||||
getRegTy : Reg -> TypeState -> Ty
|
||||
getRegTy A (a, _, _) = a
|
||||
getRegTy B (_, b, _) = b
|
||||
getRegTy R (_, _, r) = r
|
||||
|
||||
setRegTy : Reg -> Ty -> TypeState -> TypeState
|
||||
setRegTy A a (_, b, r) = (a, b, r)
|
||||
setRegTy B b (a, _, r) = (a, b, r)
|
||||
setRegTy R r (a, b, _) = (a, b, r)
|
||||
|
||||
data Expr : TypeState -> Ty -> Type where
|
||||
Lit : Int -> Expr s IntTy
|
||||
Load : (r : Reg) -> Expr s (getRegTy r s)
|
||||
Add : Expr s IntTy -> Expr s IntTy -> Expr s IntTy
|
||||
Leq : Expr s IntTy -> Expr s IntTy -> Expr s BoolTy
|
||||
Not : Expr s BoolTy -> Expr s BoolTy
|
||||
|
||||
mutual
|
||||
data Stmt : TypeState -> TypeState -> TypeState -> Type where
|
||||
Store : (r : Reg) -> Expr s t -> Stmt l s (setRegTy r t s)
|
||||
If : Expr s BoolTy -> Prog l s n -> Prog l s n -> Stmt l s n
|
||||
Loop : Prog s s s -> Stmt l s s
|
||||
Break : Stmt s s s
|
||||
|
||||
data Prog : TypeState -> TypeState -> TypeState -> Type where
|
||||
Nil : Prog l s s
|
||||
(::) : Stmt l s n -> Prog l n m -> Prog l s m
|
||||
|
||||
initialState : TypeState
|
||||
initialState = (IntTy, IntTy, IntTy)
|
||||
|
||||
testProg : Prog Main.initialState Main.initialState Main.initialState
|
||||
testProg =
|
||||
[ Store A (Lit 1 `Leq` Lit 2)
|
||||
, If (Load A)
|
||||
[ Store A (Lit 1) ]
|
||||
[ Store A (Lit 2) ]
|
||||
, Store B (Lit 2)
|
||||
, Store R (Add (Load A) (Load B))
|
||||
]
|
||||
|
||||
prodProg : Prog Main.initialState Main.initialState Main.initialState
|
||||
prodProg =
|
||||
[ Store A (Lit 7)
|
||||
, Store B (Lit 9)
|
||||
, Store R (Lit 0)
|
||||
, Loop
|
||||
[ If (Load A `Leq` Lit 0)
|
||||
[ Break ]
|
||||
[ Store R (Load R `Add` Load B)
|
||||
, Store A (Load A `Add` Lit (-1))
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
repr : Ty -> Type
|
||||
repr IntTy = Int
|
||||
repr BoolTy = Bool
|
||||
|
||||
data State : TypeState -> Type where
|
||||
MkState : (repr a, repr b, repr c) -> State (a, b, c)
|
||||
|
||||
getReg : (r : Reg) -> State s -> repr (getRegTy r s)
|
||||
getReg A (MkState (a, _, _)) = a
|
||||
getReg B (MkState (_, b, _)) = b
|
||||
getReg R (MkState (_, _, r)) = r
|
||||
|
||||
setReg : (r : Reg) -> repr t -> State s -> State (setRegTy r t s)
|
||||
setReg A a (MkState (_, b, r)) = MkState (a, b, r)
|
||||
setReg B b (MkState (a, _, r)) = MkState (a, b, r)
|
||||
setReg R r (MkState (a, b, _)) = MkState (a, b, r)
|
||||
|
||||
expr : Expr s t -> State s -> repr t
|
||||
expr (Lit i) _ = i
|
||||
expr (Load r) s = getReg r s
|
||||
expr (Add l r) s = expr l s + expr r s
|
||||
expr (Leq l r) s = expr l s <= expr r s
|
||||
expr (Not e) s = not $ expr e s
|
||||
|
||||
mutual
|
||||
stmt : Stmt l s n -> State s -> Either (State l) (State n)
|
||||
stmt (Store r e) s = Right $ setReg r (expr e s) s
|
||||
stmt (If c t e) s = if expr c s then prog t s else prog e s
|
||||
stmt (Loop p) s =
|
||||
case prog p s >>= stmt (Loop p) of
|
||||
Right s => Right s
|
||||
Left s => Right s
|
||||
stmt Break s = Left s
|
||||
|
||||
prog : Prog l s n -> State s -> Either (State l) (State n)
|
||||
prog Nil s = Right s
|
||||
prog (st::p) s = stmt st s >>= prog p
|
||||
|
||||
run : Prog l s l -> State s -> State l
|
||||
run p s = either id id $ prog p s
|
||||
99
code/typesafe-interpreter/TypesafeIntrV2.idr
Normal file
@@ -0,0 +1,99 @@
|
||||
data ExprType
|
||||
= IntType
|
||||
| BoolType
|
||||
| StringType
|
||||
|
||||
repr : ExprType -> Type
|
||||
repr IntType = Int
|
||||
repr BoolType = Bool
|
||||
repr StringType = String
|
||||
|
||||
intBoolImpossible : IntType = BoolType -> Void
|
||||
intBoolImpossible Refl impossible
|
||||
|
||||
intStringImpossible : IntType = StringType -> Void
|
||||
intStringImpossible Refl impossible
|
||||
|
||||
boolStringImpossible : BoolType = StringType -> Void
|
||||
boolStringImpossible Refl impossible
|
||||
|
||||
decEq : (a : ExprType) -> (b : ExprType) -> Dec (a = b)
|
||||
decEq IntType IntType = Yes Refl
|
||||
decEq BoolType BoolType = Yes Refl
|
||||
decEq StringType StringType = Yes Refl
|
||||
decEq IntType BoolType = No intBoolImpossible
|
||||
decEq BoolType IntType = No $ intBoolImpossible . sym
|
||||
decEq IntType StringType = No intStringImpossible
|
||||
decEq StringType IntType = No $ intStringImpossible . sym
|
||||
decEq BoolType StringType = No boolStringImpossible
|
||||
decEq StringType BoolType = No $ boolStringImpossible . sym
|
||||
|
||||
data Op
|
||||
= Add
|
||||
| Subtract
|
||||
| Multiply
|
||||
| Divide
|
||||
|
||||
data Expr
|
||||
= IntLit Int
|
||||
| BoolLit Bool
|
||||
| StringLit String
|
||||
| BinOp Op Expr Expr
|
||||
| IfElse Expr Expr Expr
|
||||
|
||||
data SafeExpr : ExprType -> Type where
|
||||
IntLiteral : Int -> SafeExpr IntType
|
||||
BoolLiteral : Bool -> SafeExpr BoolType
|
||||
StringLiteral : String -> SafeExpr StringType
|
||||
BinOperation : (repr a -> repr b -> repr c) -> SafeExpr a -> SafeExpr b -> SafeExpr c
|
||||
IfThenElse : SafeExpr BoolType -> SafeExpr t -> SafeExpr t -> SafeExpr t
|
||||
|
||||
typecheckOp : Op -> (a : ExprType) -> (b : ExprType) -> Either String (c : ExprType ** repr a -> repr b -> repr c)
|
||||
typecheckOp Add IntType IntType = Right (IntType ** (+))
|
||||
typecheckOp Subtract IntType IntType = Right (IntType ** (-))
|
||||
typecheckOp Multiply IntType IntType = Right (IntType ** (*))
|
||||
typecheckOp Divide IntType IntType = Right (IntType ** div)
|
||||
typecheckOp _ _ _ = Left "Invalid binary operator application"
|
||||
|
||||
requireBool : (n : ExprType ** SafeExpr n) -> Either String (SafeExpr BoolType)
|
||||
requireBool (BoolType ** e) = Right e
|
||||
requireBool _ = Left "Not a boolean."
|
||||
|
||||
typecheck : Expr -> Either String (n : ExprType ** SafeExpr n)
|
||||
typecheck (IntLit i) = Right (_ ** IntLiteral i)
|
||||
typecheck (BoolLit b) = Right (_ ** BoolLiteral b)
|
||||
typecheck (StringLit s) = Right (_ ** StringLiteral s)
|
||||
typecheck (BinOp o l r) = do
|
||||
(lt ** le) <- typecheck l
|
||||
(rt ** re) <- typecheck r
|
||||
(ot ** f) <- typecheckOp o lt rt
|
||||
pure (_ ** BinOperation f le re)
|
||||
typecheck (IfElse c t e) =
|
||||
do
|
||||
ce <- typecheck c >>= requireBool
|
||||
(tt ** te) <- typecheck t
|
||||
(et ** ee) <- typecheck e
|
||||
case decEq tt et of
|
||||
Yes p => pure (_ ** IfThenElse ce (replace p te) ee)
|
||||
No _ => Left "Incompatible branch types."
|
||||
|
||||
eval : SafeExpr t -> repr t
|
||||
eval (IntLiteral i) = i
|
||||
eval (BoolLiteral b) = b
|
||||
eval (StringLiteral s) = s
|
||||
eval (BinOperation f l r) = f (eval l) (eval r)
|
||||
eval (IfThenElse c t e) = if (eval c) then (eval t) else (eval e)
|
||||
|
||||
resultStr : {t : ExprType} -> repr t -> String
|
||||
resultStr {t=IntType} i = show i
|
||||
resultStr {t=BoolType} b = show b
|
||||
resultStr {t=StringType} s = show s
|
||||
|
||||
tryEval : Expr -> String
|
||||
tryEval ex =
|
||||
case typecheck ex of
|
||||
Left err => "Type error: " ++ err
|
||||
Right (t ** e) => resultStr $ eval {t} e
|
||||
|
||||
main : IO ()
|
||||
main = putStrLn $ tryEval $ BinOp Add (IfElse (BoolLit True) (IntLit 6) (IntLit 7)) (BinOp Multiply (IntLit 160) (IntLit 2))
|
||||
120
code/typesafe-interpreter/TypesafeIntrV3.idr
Normal file
@@ -0,0 +1,120 @@
|
||||
data ExprType
|
||||
= IntType
|
||||
| BoolType
|
||||
| StringType
|
||||
| PairType ExprType ExprType
|
||||
|
||||
repr : ExprType -> Type
|
||||
repr IntType = Int
|
||||
repr BoolType = Bool
|
||||
repr StringType = String
|
||||
repr (PairType t1 t2) = Pair (repr t1) (repr t2)
|
||||
|
||||
decEq : (a : ExprType) -> (b : ExprType) -> Maybe (a = b)
|
||||
decEq IntType IntType = Just Refl
|
||||
decEq BoolType BoolType = Just Refl
|
||||
decEq StringType StringType = Just Refl
|
||||
decEq (PairType lt1 lt2) (PairType rt1 rt2) = do
|
||||
subEq1 <- decEq lt1 rt1
|
||||
subEq2 <- decEq lt2 rt2
|
||||
let firstEqual = replace {P = \t1 => PairType lt1 lt2 = PairType t1 lt2} subEq1 Refl
|
||||
let secondEqual = replace {P = \t2 => PairType lt1 lt2 = PairType rt1 t2} subEq2 firstEqual
|
||||
pure secondEqual
|
||||
decEq _ _ = Nothing
|
||||
|
||||
data Op
|
||||
= Add
|
||||
| Subtract
|
||||
| Multiply
|
||||
| Divide
|
||||
|
||||
data Expr
|
||||
= IntLit Int
|
||||
| BoolLit Bool
|
||||
| StringLit String
|
||||
| BinOp Op Expr Expr
|
||||
| IfElse Expr Expr Expr
|
||||
| Pair Expr Expr
|
||||
| Fst Expr
|
||||
| Snd Expr
|
||||
|
||||
data SafeExpr : ExprType -> Type where
|
||||
IntLiteral : Int -> SafeExpr IntType
|
||||
BoolLiteral : Bool -> SafeExpr BoolType
|
||||
StringLiteral : String -> SafeExpr StringType
|
||||
BinOperation : (repr a -> repr b -> repr c) -> SafeExpr a -> SafeExpr b -> SafeExpr c
|
||||
IfThenElse : SafeExpr BoolType -> SafeExpr t -> SafeExpr t -> SafeExpr t
|
||||
NewPair : SafeExpr t1 -> SafeExpr t2 -> SafeExpr (PairType t1 t2)
|
||||
First : SafeExpr (PairType t1 t2) -> SafeExpr t1
|
||||
Second : SafeExpr (PairType t1 t2) -> SafeExpr t2
|
||||
|
||||
typecheckOp : Op -> (a : ExprType) -> (b : ExprType) -> Either String (c : ExprType ** repr a -> repr b -> repr c)
|
||||
typecheckOp Add IntType IntType = Right (IntType ** (+))
|
||||
typecheckOp Subtract IntType IntType = Right (IntType ** (-))
|
||||
typecheckOp Multiply IntType IntType = Right (IntType ** (*))
|
||||
typecheckOp Divide IntType IntType = Right (IntType ** div)
|
||||
typecheckOp _ _ _ = Left "Invalid binary operator application"
|
||||
|
||||
requireBool : (n : ExprType ** SafeExpr n) -> Either String (SafeExpr BoolType)
|
||||
requireBool (BoolType ** e) = Right e
|
||||
requireBool _ = Left "Not a boolean."
|
||||
|
||||
typecheck : Expr -> Either String (n : ExprType ** SafeExpr n)
|
||||
typecheck (IntLit i) = Right (_ ** IntLiteral i)
|
||||
typecheck (BoolLit b) = Right (_ ** BoolLiteral b)
|
||||
typecheck (StringLit s) = Right (_ ** StringLiteral s)
|
||||
typecheck (BinOp o l r) = do
|
||||
(lt ** le) <- typecheck l
|
||||
(rt ** re) <- typecheck r
|
||||
(ot ** f) <- typecheckOp o lt rt
|
||||
pure (_ ** BinOperation f le re)
|
||||
typecheck (IfElse c t e) =
|
||||
do
|
||||
ce <- typecheck c >>= requireBool
|
||||
(tt ** te) <- typecheck t
|
||||
(et ** ee) <- typecheck e
|
||||
case decEq tt et of
|
||||
Just p => pure (_ ** IfThenElse ce (replace p te) ee)
|
||||
Nothing => Left "Incompatible branch types."
|
||||
typecheck (Pair l r) =
|
||||
do
|
||||
(lt ** le) <- typecheck l
|
||||
(rt ** re) <- typecheck r
|
||||
pure (_ ** NewPair le re)
|
||||
typecheck (Fst p) =
|
||||
do
|
||||
(pt ** pe) <- typecheck p
|
||||
case pt of
|
||||
PairType _ _ => pure $ (_ ** First pe)
|
||||
_ => Left "Applying fst to non-pair."
|
||||
typecheck (Snd p) =
|
||||
do
|
||||
(pt ** pe) <- typecheck p
|
||||
case pt of
|
||||
PairType _ _ => pure $ (_ ** Second pe)
|
||||
_ => Left "Applying snd to non-pair."
|
||||
|
||||
eval : SafeExpr t -> repr t
|
||||
eval (IntLiteral i) = i
|
||||
eval (BoolLiteral b) = b
|
||||
eval (StringLiteral s) = s
|
||||
eval (BinOperation f l r) = f (eval l) (eval r)
|
||||
eval (IfThenElse c t e) = if (eval c) then (eval t) else (eval e)
|
||||
eval (NewPair l r) = (eval l, eval r)
|
||||
eval (First p) = fst (eval p)
|
||||
eval (Second p) = snd (eval p)
|
||||
|
||||
resultStr : {t : ExprType} -> repr t -> String
|
||||
resultStr {t=IntType} i = show i
|
||||
resultStr {t=BoolType} b = show b
|
||||
resultStr {t=StringType} s = show s
|
||||
resultStr {t=PairType t1 t2} (l,r) = "(" ++ resultStr l ++ ", " ++ resultStr r ++ ")"
|
||||
|
||||
tryEval : Expr -> String
|
||||
tryEval ex =
|
||||
case typecheck ex of
|
||||
Left err => "Type error: " ++ err
|
||||
Right (t ** e) => resultStr $ eval {t} e
|
||||
|
||||
main : IO ()
|
||||
main = putStrLn $ tryEval $ BinOp Add (Fst (IfElse (BoolLit True) (Pair (IntLit 6) (BoolLit True)) (Pair (IntLit 7) (BoolLit False)))) (BinOp Multiply (IntLit 160) (IntLit 2))
|
||||
5
config-gen.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[params]
|
||||
[params.submoduleLinks]
|
||||
[params.submoduleLinks.aoc2020]
|
||||
url = "https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020/src/commit/7a8503c3fe1aa7e624e4d8672aa9b56d24b4ba82"
|
||||
path = "aoc-2020"
|
||||
17
config.toml
@@ -3,5 +3,20 @@ languageCode = "en-us"
|
||||
title = "Daniel's Blog"
|
||||
theme = "vanilla"
|
||||
pygmentsCodeFences = true
|
||||
pygmentsStyle = "github"
|
||||
pygmentsUseClasses = true
|
||||
summaryLength = 20
|
||||
|
||||
[outputFormats]
|
||||
[outputFormats.Toml]
|
||||
name = "toml"
|
||||
mediaType = "application/toml"
|
||||
isHTML = false
|
||||
|
||||
[outputs]
|
||||
home = ["html","rss","toml"]
|
||||
|
||||
[markup]
|
||||
[markup.tableOfContents]
|
||||
endLevel = 4
|
||||
ordered = false
|
||||
startLevel = 3
|
||||
|
||||
@@ -1,10 +1,43 @@
|
||||
---
|
||||
title: About
|
||||
---
|
||||
I'm Daniel, a Computer Science student currently in my third (and final) undergraduate year at Oregon State University.
|
||||
Due my initial interest in calculators and compilers, I got involved in the Programming Language Theory research
|
||||
{{< donate_css >}}
|
||||
|
||||
I'm Daniel, a Computer Science student currently working towards my Master's Degree at Oregon State University.
|
||||
Due to my initial interest in calculators and compilers, I got involved in the Programming Language Theory research
|
||||
group, gaining same experience in formal verification, domain specific language, and explainable computing.
|
||||
|
||||
For work, school, and hobby projects, I use a variety of programming languages, most commonly C/C++,
|
||||
Haskell, [Crystal](https://crystal-lang.org/), and [Elm](https://elm-lang.org/). I also have experience
|
||||
with Java, Python, Haxe, and JavaScript.
|
||||
|
||||
A few notes about me or this site:
|
||||
* __Correctness__: I mostly write technical content. Even though I proofread my articles, there's
|
||||
always a fairly good chance that I'm wrong. You should always use your best judgement when reading
|
||||
anything on this site -- if something seems wrong, it may very well be. I'm far from an expert.
|
||||
* __Schedule__: I do not have a set post schedule. There are many reasons for this:
|
||||
schoolwork, personal life, lack of inspiration. It also takes a _very_ long time for
|
||||
me to write a single article. My article on [polymorphic type checking]({{< relref "/blog/10_compiler_polymorphism.md" >}})
|
||||
is around 8,000 words long; besides writing it, I have to edit it, link up all the code
|
||||
references, and proofread the final result. And of course, I need to write the code and
|
||||
occasionally do some research.
|
||||
* __Design__: I am doing my best to keep this website accessible and easy on the eyes.
|
||||
I'm also doing my best to avoid any and all uses of JavaScript. I used to use a lot of
|
||||
uMatrix, and most of the websites I browsed during this time were broken. Similarly,
|
||||
a lot of websites were unusable on my weaker machines. So, I'm doing my part and
|
||||
making this site usable without any JavaScript, and, as it seems to me, even
|
||||
without any CSS.
|
||||
* __Source code__: This blog is open source, but not on GitHub. Instead,
|
||||
you can find the code on my [Gitea instance](https://dev.danilafe.com/Web-Projects/blog-static).
|
||||
If you use this code for your own site, I would prefer that you don't copy the theme.
|
||||
|
||||
### Donate
|
||||
I don't run ads, nor do I profit from writing anything on here. I have no trouble paying for hosting,
|
||||
and I write my articles voluntarily, for my own enjoyment. However, if you found something particularly
|
||||
helpful on here, and would like to buy me a cup of coffee or help host the site, you can donate using
|
||||
the method(s) below.
|
||||
|
||||
{{< donation_methods >}}
|
||||
{{< donation_method "Bitcoin" "1BbXPZhdzv4xHq5LYhme3xBiUsHw5fmafd" >}}
|
||||
{{< donation_method "Ethereum" "0xd111E49344bEC80570e68EE0A00b87B1EFcb5D56" >}}
|
||||
{{< /donation_methods >}}
|
||||
|
||||
351
content/blog/00_aoc_coq.md
Normal file
@@ -0,0 +1,351 @@
|
||||
---
|
||||
title: "Advent of Code in Coq - Day 1"
|
||||
date: 2020-12-02T18:44:56-08:00
|
||||
tags: ["Advent of Code", "Coq"]
|
||||
favorite: true
|
||||
---
|
||||
|
||||
The first puzzle of this year's [Advent of Code](https://adventofcode.com) was quite
|
||||
simple, which gave me a thought: "Hey, this feels within reach for me to formally verify!"
|
||||
At first, I wanted to formalize and prove the correctness of the [two-pointer solution](https://www.geeksforgeeks.org/two-pointers-technique/).
|
||||
However, I didn't have the time to mess around with the various properties of sorted
|
||||
lists and their traversals. So, I settled for the brute force solution. Despite
|
||||
the simplicity of its implementation, there is plenty to talk about when proving
|
||||
its correctness using Coq. Let's get right into it!
|
||||
|
||||
Before we start, in the interest of keeping the post self-contained, here's the (paraphrased)
|
||||
problem statement:
|
||||
|
||||
> Given an unsorted list of numbers, find two distinct numbers that add up to 2020.
|
||||
|
||||
With this in mind, we can move on to writing some Coq!
|
||||
|
||||
### Defining the Functions
|
||||
The first step to proving our code correct is to actually write the code! To start with,
|
||||
let's write a helper function that, given a number `x`, tries to find another number
|
||||
`y` such that `x + y = 2020`. In fact, rather than hardcoding the desired
|
||||
sum to `2020`, let's just use another argument called `total`. The code is quite simple:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 11 18 >}}
|
||||
|
||||
Here, `is` is the list of numbers that we want to search.
|
||||
We proceed by case analysis: if the list is empty, we can't
|
||||
find a match, so we return `None` (the Coq equivalent of Haskell's `Nothing`).
|
||||
On the other hand, if the list has at least one element `y`, we see if it adds
|
||||
up to `total`, and return `Some y` (equivalent to `Just y` in Haskell) if it does.
|
||||
If it doesn't, we continue our search into the rest of the list.
|
||||
|
||||
It's somewhat unusual, in my experience, to put the list argument first when writing
|
||||
functions in a language with [currying](https://wiki.haskell.org/Currying). However,
|
||||
it seems as though Coq's `simpl` tactic, which we will use later, works better
|
||||
for our purposes when the argument being case analyzed is given first.
|
||||
|
||||
We can now use `find_matching` to define our `find_sum` function, which solves part 1.
|
||||
Here's the code:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 20 28 >}}
|
||||
|
||||
For every `x` that we encounter in our input list `is`, we want to check if there's
|
||||
a matching number in the rest of the list. We only search the remainder of the list
|
||||
because we can't use `x` twice: the `x` and `y` we return that add up to `total`
|
||||
must be different elements. We use `find_matching` to try find a complementary number
|
||||
for `x`. If we don't find it, this `x` isn't it, so we recursively move on to `xs`.
|
||||
On the other hand, if we _do_ find a matching `y`, we're done! We return `(x,y)`,
|
||||
wrapped in `Some` to indicate that we found something useful.
|
||||
|
||||
What about that `(* Was buggy! *)` line? Well, it so happens that my initial
|
||||
implementation had a bug on this line, one that came up as I was proving
|
||||
the correctness of my function. When I wasn't able to prove a particular
|
||||
behavior in one of the cases, I realized something was wrong. In short,
|
||||
my proof actually helped me find and fix a bug!
|
||||
|
||||
This is all the code we'll need to get our solution. Next, let's talk about some
|
||||
properties of our two functions.
|
||||
|
||||
### Our First Lemma
|
||||
When we call `find_matching`, we want to be sure that if we get a number,
|
||||
it does indeed add up to our expected total. We can state it a little bit more
|
||||
formally as follows:
|
||||
|
||||
> For any numbers `k` and `x`, and for any list of number `is`,
|
||||
> if `find_matching is k x` returns a number `y`, then `x + y = k`.
|
||||
|
||||
And this is how we write it in Coq:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 30 31 >}}
|
||||
|
||||
The arrow, `->`, reads "implies". Other than that, I think this
|
||||
property reads pretty well. The proof, unfortunately, is a little bit more involved.
|
||||
Here are the first few lines:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 32 35 >}}
|
||||
|
||||
We start with the `intros is` tactic, which is akin to saying
|
||||
"consider a particular list of integers `is`". We do this without losing
|
||||
generality: by simply examining a concrete list, we've said nothing about
|
||||
what that list is like. We then proceed by induction on `is`.
|
||||
|
||||
To prove something by induction for a list, we need to prove two things:
|
||||
|
||||
* The __base case__. Whatever property we want to hold, it must
|
||||
hold for the empty list, which is the simplest possible list.
|
||||
In our case, this means `find_matching` searching an empty list.
|
||||
* The __inductive case__. Assuming that a property holds for any list
|
||||
`[b, c, ...]`, we want to show that the property also holds for
|
||||
the list `[a, b, c, ...]`. That is, the property must remain true if we
|
||||
prepend an element to a list for which this property holds.
|
||||
|
||||
These two things combined give us a proof for _all_ lists, which is exactly
|
||||
what we want! If you don't belive me, here's how it works. Suppose you want
|
||||
to prove that some property `P` holds for `[1,2,3,4]`. Given the base
|
||||
case, we know that `P []` holds. Next, by the inductive case, since
|
||||
`P []` holds, we can prepend `4` to the list, and the property will
|
||||
still hold. Thus, `P [4]`. Now that `P [4]` holds, we can again prepend
|
||||
an element to the list, this time a `3`, and conclude that `P [3,4]`.
|
||||
Repeating this twice more, we arrive at our desired fact: `P [1,2,3,4]`.
|
||||
|
||||
When we write `induction is`, Coq will generate two proof goals for us,
|
||||
one for the base case, and one for the inductive case. We will have to prove
|
||||
each of them separately. Since we have
|
||||
not yet introduced the variables `k`, `x`, and `y`, they remain
|
||||
inside a `forall` quantifier at that time. To be able to refer
|
||||
to them, we want to use `intros`. We want to do this in both the
|
||||
base and the inductive case. To quickly do this, we use Coq's `;`
|
||||
operator. When we write `a; b`, Coq runs the tactic `a`, and then
|
||||
runs the tactic `b` in every proof goal generated by `a`. This is
|
||||
exactly what we want.
|
||||
|
||||
There's one more variable inside our second `intros`: `Hev`.
|
||||
This variable refers to the hypothesis of our statement:
|
||||
that is, the part on the left of the `->`. To prove that `A`
|
||||
implies `B`, we assume that `A` holds, and try to argue `B` from there.
|
||||
Here is no different: when we use `intros Hev`, we say, "suppose that you have
|
||||
a proof that `find_matching` evaluates to `Some y`, called `Hev`". The thing
|
||||
on the right of `->` becomes our proof goal.
|
||||
|
||||
Now, it's time to look at the cases. To focus on one case at a time,
|
||||
we use `-`. The first case is our base case. Here's what Coq prints
|
||||
out at this time:
|
||||
|
||||
```
|
||||
k, x, y : nat
|
||||
Hev : find_matching nil k x = Some y
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
x + y = k
|
||||
```
|
||||
|
||||
All the stuff above the `===` line are our hypotheses. We know
|
||||
that we have some `k`, `x`, and `y`, all of which are numbers.
|
||||
We also have the assumption that `find_matching` returns `Some y`.
|
||||
In the base case, `is` is just `[]`, and this is reflected in the
|
||||
type for `Hev`. To make this more clear, we'll simplify the call to `find_matching`
|
||||
in `Hev`, using `simpl in Hev`. Now, here's what Coq has to say about `Hev`:
|
||||
|
||||
```
|
||||
Hev : None = Some y
|
||||
```
|
||||
|
||||
Well, this doesn't make any sense. How can something be equal to nothing?
|
||||
We ask Coq this question using `inversion Hev`. Effectively, the question
|
||||
that `inversion` asks is: what are the possible ways we could have acquired `Hev`?
|
||||
Coq generates a proof goal for each of these possible ways. Alas, there are
|
||||
no ways to arrive at this contradictory assumption: the number of proof sub-goals
|
||||
is zero. This means we're done with the base case!
|
||||
|
||||
The inductive case is the meat of this proof. Here's the corresponding part
|
||||
of the Coq source file:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 36 40 >}}
|
||||
|
||||
This time, the proof state is more complicated:
|
||||
|
||||
```
|
||||
a : nat
|
||||
is : list nat
|
||||
IHis : forall k x y : nat, find_matching is k x = Some y -> x + y = k
|
||||
k, x, y : nat
|
||||
Hev : find_matching (a :: is) k x = Some y
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
x + y = k
|
||||
```
|
||||
|
||||
Following the footsteps of our informal description of the inductive case,
|
||||
Coq has us prove our property for `(a :: is)`, or the list `is` to which
|
||||
`a` is being prepended. Like before, we assume that our property holds for `is`.
|
||||
This is represented in the __induction hypothesis__ `IHis`. It states that if
|
||||
`find_matching` finds a `y` in `is`, it must add up to `k`. However, `IHis`
|
||||
doesn't tell us anything about `a :: is`: that's our job. We also still have
|
||||
`Hev`, which is our assumption that `find_matching` finds a `y` in `(a :: is)`.
|
||||
Running `simpl in Hev` gives us the following:
|
||||
|
||||
```
|
||||
Hev : (if x + a =? k then Some a else find_matching is k x) = Some y
|
||||
```
|
||||
|
||||
The result of `find_matching` now depends on whether or not the new element `a`
|
||||
adds up to `k`. If it does, then `find_matching` will return `a`, which means
|
||||
that `y` is the same as `a`. If not, it must be that `find_matching` finds
|
||||
the `y` in the rest of the list, `is`. We're not sure which of the possibilities
|
||||
is the case. Fortunately, we don't need to be!
|
||||
If we can prove that the `y` that `find_matching` finds is correct regardless
|
||||
of whether `a` adds up to `k` or not, we're good to go! To do this,
|
||||
we perform case analysis using `destruct`.
|
||||
|
||||
Our particular use of `destruct` says: check any possible value for `x + a ?= k`,
|
||||
and create an equation `Heq` that tells us what that value is. `?=` returns a boolean
|
||||
value, and so `destruct` generates two new goals: one where the function returns `true`,
|
||||
and one where it returns `false`. We start with the former. Here's the proof state:
|
||||
|
||||
```
|
||||
a : nat
|
||||
is : list nat
|
||||
IHis : forall k x y : nat, find_matching is k x = Some y -> x + y = k
|
||||
k, x, y : nat
|
||||
Heq : (x + a =? k) = true
|
||||
Hev : Some a = Some y
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
x + y = k
|
||||
```
|
||||
|
||||
There is a new hypothesis: `Heq`. It tells us that we're currently
|
||||
considering the case where `?=` evaluates to `true`. Also,
|
||||
`Hev` has been considerably simplified: now that we know the condition
|
||||
of the `if` expression, we can just replace it with the `then` branch.
|
||||
|
||||
Looking at `Hev`, we can see that our prediction was right: `a` is equal to `y`. After all,
|
||||
if they weren't, `Some a` wouldn't equal to `Some y`. To make Coq
|
||||
take this information into account, we use `injection`. This will create
|
||||
a new hypothesis, `a = y`. But if one is equal to the other, why don't we
|
||||
just use only one of these variables everywhere? We do exactly that by using
|
||||
`subst`, which replaces `a` with `y` everywhere in our proof.
|
||||
|
||||
The proof state is now:
|
||||
|
||||
```
|
||||
is : list nat
|
||||
IHis : forall k x y : nat, find_matching is k x = Some y -> x + y = k
|
||||
k, x, y : nat
|
||||
Heq : (x + y =? k) = true
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
x + y = k
|
||||
```
|
||||
|
||||
We're close, but there's one more detail to keep in mind. Our goal, `x + y = k`,
|
||||
is the __proposition__ that `x + y` is equal to `k`. However, `Heq` tells us
|
||||
that the __function__ `?=` evaluates to `true`. These are fundamentally different.
|
||||
One talks about mathematical equality, while the other about some function `?=`
|
||||
defined somewhere in Coq's standard library. Who knows - maybe there's a bug in
|
||||
Coq's implementation! Fortunately, Coq comes with a proof that if two numbers
|
||||
are equal according to `?=`, they are mathematically equal. This proof is
|
||||
called `eqb_nat_eq`. We tell Coq to use this with `apply`. Our proof goal changes to:
|
||||
|
||||
```
|
||||
true = (x + y =? k)
|
||||
```
|
||||
|
||||
This is _almost_ like `Heq`, but flipped. Instead of manually flipping it and using `apply`
|
||||
with `Heq`, I let Coq do the rest of the work using `auto`.
|
||||
|
||||
Phew! All this for the `true` case of `?=`. Next, what happens if `x + a` does not equal `k`?
|
||||
Here's the proof state at this time:
|
||||
|
||||
```
|
||||
a : nat
|
||||
is : list nat
|
||||
IHis : forall k x y : nat, find_matching is k x = Some y -> x + y = k
|
||||
k, x, y : nat
|
||||
Heq : (x + a =? k) = false
|
||||
Hev : find_matching is k x = Some y
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
x + y = k
|
||||
```
|
||||
|
||||
Since `a` was not what it was looking for, `find_matching` moved on to `is`. But hey,
|
||||
we're in the inductive case! We are assuming that `find_matching` will work properly
|
||||
with the list `is`. Since `find_matching` found its `y` in `is`, this should be all we need!
|
||||
We use our induction hypothesis `IHis` with `apply`. `IHis` itself does not know that
|
||||
`find_matching` moved on to `is`, so it asks us to prove it. Fortunately, `Hev` tells us
|
||||
exactly that, so we use `assumption`, and the proof is complete! Quod erat demonstrandum, QED!
|
||||
|
||||
### The Rest of the Owl
|
||||
Here are a couple of other properties of `find_matching`. For brevity's sake, I will
|
||||
not go through their proofs step-by-step. I find that the best way to understand
|
||||
Coq proofs is to actually step through them in the IDE!
|
||||
|
||||
First on the list is `find_matching_skip`. Here's the type:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 42 43 >}}
|
||||
|
||||
It reads: if we correctly find a number in a small list `is`, we can find that same number
|
||||
even if another number is prepended to `is`. That makes sense: _adding_ a number to
|
||||
a list doesn't remove whatever we found in it! I used this lemma to prove another,
|
||||
`find_matching_works`:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 53 54 >}}
|
||||
|
||||
This reads, if there _is_ an element `y` in `is` that adds up to `k` with `x`, then
|
||||
`find_matching` will find it. This is an important property. After all, if it didn't
|
||||
hold, it would mean that `find_matching` would occasionally fail to find a matching
|
||||
number, even though it's there! We can't have that.
|
||||
|
||||
Finally, we want to specify what it means for `find_sum`, our solution function, to actually
|
||||
work. The naive definition would be:
|
||||
|
||||
> Given a list of integers, `find_sum` always finds a pair of numbers that add up to `k`.
|
||||
|
||||
Unfortunately, this is not true. What if, for instance, we give `find_sum` an empty list?
|
||||
There are no numbers from that list to find and add together. Even a non-empty list
|
||||
may not include such a pair! We need a way to characterize valid input lists. I claim
|
||||
that all lists from this Advent of Code puzzle are guaranteed to have two numbers that
|
||||
add up to our goal, and that these numbers are not equal to each other. In Coq,
|
||||
we state this as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 8 9 >}}
|
||||
|
||||
This defines a new property, `has_pair t is` (read "`is` has a pair of numbers that add to `t`"),
|
||||
which means:
|
||||
|
||||
> There are two numbers `n1` and `n2` such that, they are not equal to each other (`n1<>n2`) __and__
|
||||
> the number `n1` is an element of `is` (`In n1 is`) __and__
|
||||
> the number `n2` is an element of `is` (`In n2 is`) __and__
|
||||
> the two numbers add up to `t` (`n1 + n2 = t`).
|
||||
|
||||
When making claims about the correctness of our algorithm, we will assume that this
|
||||
property holds. Finally, here's the theorem we want to prove:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 68 70 >}}
|
||||
|
||||
It reads, "for any total `k` and list `is`, if `is` has a pair of numbers that add to `k`,
|
||||
then `find_sum` will return a pair of numbers `x` and `y` that add to `k`".
|
||||
There's some nuance here. We hardly reference the `has_pair` property in this definition,
|
||||
and for good reason. Our `has_pair` hypothesis only says that there is _at least one_
|
||||
pair of numbers in `is` that meets our criteria. However, this pair need not be the only
|
||||
one, nor does it need to be the one returned by `find_sum`! However, if we have many pairs,
|
||||
we want to confirm that `find_sum` will find one of them. Finally, here is the proof.
|
||||
I will not be able to go through it in detail in this post, but I did comment it to
|
||||
make it easier to read:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day1.v" 71 106 >}}
|
||||
|
||||
Coq seems happy with it, and so am I! The bug I mentioned earlier popped up on line 96.
|
||||
I had accidentally made `find_sum` return `None` if it couldn't find a complement
|
||||
for the `x` it encountered. This meant that it never recursed into the remaining
|
||||
list `xs`, and thus, the pair was never found at all! It this became impossible
|
||||
to prove that `find_some` will return `Some y`, and I had to double back
|
||||
and check my definitions.
|
||||
|
||||
I hope you enjoyed this post! If you're interested to learn more about Coq, I strongly recommend
|
||||
checking out [Software Foundations](https://softwarefoundations.cis.upenn.edu/), a series
|
||||
of books on Coq written as comments in a Coq source file! In particular, check out
|
||||
[Logical Foundations](https://softwarefoundations.cis.upenn.edu/lf-current/index.html)
|
||||
for an introduction to using Coq. Thanks for reading!
|
||||
@@ -145,4 +145,5 @@ Here are the posts that I've written so far for this series:
|
||||
* [Polymorphism]({{< relref "10_compiler_polymorphism.md" >}})
|
||||
* [Polymorphic Data Types]({{< relref "11_compiler_polymorphic_data_types.md" >}})
|
||||
* [Let/In and Lambdas]({{< relref "12_compiler_let_in_lambda/index.md" >}})
|
||||
* [Cleanup]({{< relref "13_compiler_cleanup/index.md" >}})
|
||||
|
||||
|
||||
940
content/blog/01_aoc_coq.md
Normal file
@@ -0,0 +1,940 @@
|
||||
---
|
||||
title: "Advent of Code in Coq - Day 8"
|
||||
date: 2021-01-10T22:48:39-08:00
|
||||
tags: ["Advent of Code", "Coq"]
|
||||
---
|
||||
|
||||
Huh? We're on day 8? What happened to days 2 through 7?
|
||||
|
||||
Well, for the most part, I didn't think they were that interesting from the Coq point of view.
|
||||
Day 7 got close, but not close enough to inspire me to create a formalization. Day 8, on the other
|
||||
hand, is
|
||||
{{< sidenote "right" "pl-note" "quite interesting," >}}
|
||||
Especially to someone like me who's interested in programming languages!
|
||||
{{< /sidenote >}} and took quite some time to formalize.
|
||||
|
||||
As before, here's an (abridged) description of the problem:
|
||||
|
||||
> Given a tiny assembly-like language, determine the state of its accumulator
|
||||
> when the same instruction is executed twice.
|
||||
|
||||
Before we start on the Coq formalization, let's talk about an idea from
|
||||
Programming Language Theory (PLT), _big step operational semantics_.
|
||||
|
||||
### Big Step Operational Semantics
|
||||
What we have in Advent of Code's Day 8 is, undeniably, a small programming language.
|
||||
We are tasked with executing this language, or, in PLT lingo, defining its _semantics_.
|
||||
There are many ways of doing this - at university, I've been taught of [denotational](https://en.wikipedia.org/wiki/Denotational_semantics), [axiomatic](https://en.wikipedia.org/wiki/Axiomatic_semantics),
|
||||
and [operational](https://en.wikipedia.org/wiki/Operational_semantics) semantics.
|
||||
I believe that Coq's mechanism of inductive definitions lends itself very well
|
||||
to operational semantics, so we'll take that route. But even "operational semantics"
|
||||
doesn't refer to a concrete technique - we have a choice between small-step (structural) and
|
||||
big-step (natural) operational semantics. The former describe the minimal "steps" a program
|
||||
takes as it's being evaluated, while the latter define the final results of evaluating a program.
|
||||
I decided to go with big-step operational semantics, since they're more intutive (natural!).
|
||||
|
||||
So, how does one go about "[defining] the final results of evaluating a program?" Most commonly,
|
||||
we go about using _inference rules_. Let's talk about those next.
|
||||
|
||||
#### Inference Rules
|
||||
Inference rules are a very general notion. The describe how we can determine (infer) a conclusion
|
||||
from a set of assumptions. It helps to look at an example. Here's a silly little inference rule:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{\text{I'm allergic to cats} \quad \text{My friend has a cat}}
|
||||
{\text{I will not visit my friend very much}}
|
||||
{{< /latex >}}
|
||||
|
||||
It reads, "if I'm allergic to cats, and if my friend has a cat, then I will not visit my friend very much".
|
||||
Here, "I'm allergic to cats" and "my friend has a cat" are _premises_, and "I will not visit my friend very much" is
|
||||
a _conclusion_. An inference rule states that if all its premises are true, then its conclusion must be true.
|
||||
Here's another inference rule, this time with some mathematical notation instead of words:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{n < m}
|
||||
{n + 1 < m + 1}
|
||||
{{< /latex >}}
|
||||
|
||||
This one reads, "if \\(n\\) is less than \\(m\\), then \\(n+1\\) is less than \\(m+1\\)". We can use inference
|
||||
rules to define various constructs. As an example, let's define what it means for a natural number to be even.
|
||||
It takes two rules:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{}
|
||||
{0 \; \text{is even}}
|
||||
\quad
|
||||
\frac
|
||||
{n \; \text{is even}}
|
||||
{n+2 \; \text{is even}}
|
||||
{{< /latex >}}
|
||||
|
||||
First of all, zero is even. We take this as fact - there are no premises for the first rule, so they
|
||||
are all trivially true. Next, if a number is even, then adding 2 to that number results in another
|
||||
even number. Using the two of these rules together, we can correctly determine whether any number
|
||||
is or isn't even. We start knowing that 0 is even. Adding 2 we learn that 2 is even, and adding 2
|
||||
again we see that 4 is even, as well. We can continue this to determine that 6, 8, 10, and so on
|
||||
are even too. Never in this process will we visit the numbers 1 or 3 or 5, and that's good - they're not even!
|
||||
|
||||
Let's now extend this notion to programming languages, starting with a simple arithmetic language.
|
||||
This language is made up of natural numbers and the \\(\square\\) operation, which represents the addition
|
||||
of two numbers. Again, we need two rules:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{n \in \mathbb{N}}
|
||||
{n \; \text{evaluates to} \; n}
|
||||
\quad
|
||||
\frac
|
||||
{e_1 \; \text{evaluates to} \; n_1 \quad e_2 \; \text{evaluates to} \; n_2}
|
||||
{e_1 \square e_2 \; \text{evaluates to} \; n_1 + n_2}
|
||||
{{< /latex >}}
|
||||
|
||||
First, let me explain myself. I used \\(\square\\) to demonstrate two important points. First, languages can be made of
|
||||
any kind of characters we want; it's the rules that we define that give these languages meaning.
|
||||
Second, while \\(\square\\) is the addition operation _in our language_, \\(+\\) is the _mathematical addition operator_.
|
||||
They are not the same - we use the latter to define how the former works.
|
||||
|
||||
Finally, writing "evaluates to" gets quite tedious, especially for complex languages. Instead,
|
||||
PLT people use notation to make their semantics more concise. The symbol \\(\Downarrow\\) is commonly
|
||||
used to mean "evaluates to"; thus, \\(e \Downarrow v\\) reads "the expression \\(e\\) evaluates to the value \\(v\\).
|
||||
Using this notation, our rules start to look like the following:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{n \in \mathbb{N}}
|
||||
{n \Downarrow n}
|
||||
\quad
|
||||
\frac
|
||||
{e_1 \Downarrow n_1 \quad e_2 \Downarrow n_2}
|
||||
{e_1 \square e_2 \Downarrow n_1 + n_2}
|
||||
{{< /latex >}}
|
||||
|
||||
If nothing else, these are way more compact! Though these may look intimidating at first, it helps to
|
||||
simply read each symbol as its English meaning.
|
||||
|
||||
#### Encoding Inference Rules in Coq
|
||||
Now that we've seen what inference rules are, we can take a look at how they can be represented in Coq.
|
||||
We can use Coq's `Inductive` mechanism to define the rules. Let's start with our "is even" property.
|
||||
|
||||
```Coq
|
||||
Inductive is_even : nat -> Prop :=
|
||||
| zero_even : is_even 0
|
||||
| plustwo_even : is_even n -> is_even (n+2).
|
||||
```
|
||||
|
||||
The first line declares the property `is_even`, which, given a natural number, returns proposition.
|
||||
This means that `is_even` is not a proposition itself, but `is_even 0`, `is_even 1`, and `is_even 2`
|
||||
are all propositions.
|
||||
|
||||
The following two lines each encode one of our aforementioned inference rules. The first rule, `zero_even`,
|
||||
is of type `is_even 0`. The `zero_even` rule doesn't require any arguments, and we can use it to create
|
||||
a proof that 0 is even. On the other hand, the `plustwo_even` rule _does_ require an argument, `is_even n`.
|
||||
To construct a proof that a number `n+2` is even using `plustwo_even`, we need to provide a proof
|
||||
that `n` itself is even. From this definition we can see a general principle: we encode each inference
|
||||
rule as constructor of an inductive Coq type. Each rule encoded in this manner takes as arguments
|
||||
the proofs of its premises, and returns a proof of its conclusion.
|
||||
|
||||
For another example, let's encode our simple addition language. First, we have to define the language
|
||||
itself:
|
||||
|
||||
```Coq
|
||||
Inductive tinylang : Type :=
|
||||
| number (n : nat) : tinylang
|
||||
| box (e1 e2 : tinylang) : tinylang.
|
||||
```
|
||||
|
||||
This defines the two elements of our example language: `number n` corresponds to \\(n\\), and `box e1 e2` corresponds
|
||||
to \\(e_1 \square e_2\\). Finally, we define the inference rules:
|
||||
|
||||
```Coq {linenos=true}
|
||||
Inductive tinylang_sem : tinylang -> nat -> Prop :=
|
||||
| number_sem : forall (n : nat), tinylang_sem (number n) n
|
||||
| box_sem : forall (e1 e2 : tinylang) (n1 n2 : nat),
|
||||
tinylang_sem e1 n1 -> tinylang_sem e2 n2 ->
|
||||
tinylang_sem (box e1 e2) (n1 + n2).
|
||||
```
|
||||
|
||||
When we wrote our rules earlier, by using arbitrary variables like \\(e_1\\) and \\(n_1\\), we implicitly meant
|
||||
that our rules work for _any_ number or expression. When writing Coq we have to make this assumption explicit
|
||||
by using `forall`. For instance, the rule on line 2 reads, "for any number `n`, the expression `n` evaluates to `n`".
|
||||
|
||||
#### Semantics of Our Language
|
||||
|
||||
We've now written some example big-step operational semantics, both "on paper" and in Coq. Now, it's time to take a look at
|
||||
the specific semantics of the language from Day 8! Our language consists of a few parts.
|
||||
|
||||
First, there are three opcodes: \\(\texttt{jmp}\\), \\(\\texttt{nop}\\), and \\(\\texttt{add}\\). Opcodes, combined
|
||||
with an integer, make up an instruction. For example, the instruction \\(\\texttt{add} \\; 3\\) will increase the
|
||||
content of the accumulator by three. Finally, a program consists of a sequence of instructions; They're separated
|
||||
by newlines in the puzzle input, but we'll instead separate them by semicolons. For example, here's a complete program.
|
||||
|
||||
{{< latex >}}
|
||||
\texttt{add} \; 0; \; \texttt{nop} \; 2; \; \texttt{jmp} \; -2
|
||||
{{< /latex >}}
|
||||
|
||||
Now, let's try evaluating this program. Starting at the beginning and with 0 in the accumulator,
|
||||
it will add 0 to the accumulator (keeping it the same),
|
||||
do nothing, and finally jump back to the beginning. At this point, it will try to run the addition instruction again,
|
||||
which is not allowed; thus, the program will terminate.
|
||||
|
||||
Did you catch that? The semantics of this language will require more information than just our program itself (which we'll denote by \\(p\\)).
|
||||
* First, to evaluate the program we will need a program counter, \\(\\textit{c}\\). This program counter
|
||||
will tell us the position of the instruction to be executed next. It can also point past the last instruction,
|
||||
which means our program terminated successfully.
|
||||
* Next, we'll need the accumulator \\(a\\). Addition instructions can change the accumulator, and we will be interested
|
||||
in the number that ends up in the accumulator when our program finishes executing.
|
||||
* Finally, and more subtly, we'll need to keep track of the states we visited. For instance,
|
||||
in the course of evaluating our program above, we encounter the \\((c, a)\\) pair of \\((0, 0)\\) twice: once
|
||||
at the beginning, and once at the end. However, whereas at the beginning we have not yet encountered the addition
|
||||
instruction, at the end we have, so the evaluation behaves differently. To make the proofs work better in Coq,
|
||||
we'll use a set \\(v\\) of
|
||||
{{< sidenote "right" "allowed-note" "allowed (valid) program counters (as opposed to visited program counters)." >}}
|
||||
Whereas the set of "visited" program counters keeps growing as our evaluation continues,
|
||||
the set of "allowed" program counters keeps shrinking. Because the "allowed" set never stops shrinking,
|
||||
assuming we're starting with a finite set, our execution will eventually terminate.
|
||||
{{< /sidenote >}}
|
||||
|
||||
Now we have all the elements of our evaluation. Let's define some notation. A program starts at some state,
|
||||
and terminates in another, possibly different state. In the course of a regular evaluation, the program
|
||||
never changes; only the state does. So I propose this (rather unorthodox) notation:
|
||||
|
||||
{{< latex >}}
|
||||
(c, a, v) \Rightarrow_p (c', a', v')
|
||||
{{< /latex >}}
|
||||
|
||||
This reads, "after starting at program counter \\(c\\), accumulator \\(a\\), and set of valid addresses \\(v\\),
|
||||
the program \\(p\\) terminates with program counter \\(c'\\), accumulator \\(a'\\), and set of valid addresses \\(v'\\)".
|
||||
Before creating the inference rules for this evaluation relation, let's define the effect of evaluating a single
|
||||
instruction, using notation \\((c, a) \rightarrow_i (c', a')\\). An addition instruction changes the accumulator,
|
||||
and increases the program counter by 1.
|
||||
|
||||
{{< latex >}}
|
||||
\frac{}
|
||||
{(c, a) \rightarrow_{\texttt{add} \; n} (c+1, a+n)}
|
||||
{{< /latex >}}
|
||||
|
||||
A no-op instruction does even less. All it does is increment the program counter.
|
||||
|
||||
{{< latex >}}
|
||||
\frac{}
|
||||
{(c, a) \rightarrow_{\texttt{nop} \; n} (c+1, a)}
|
||||
{{< /latex >}}
|
||||
|
||||
Finally, a jump instruction leaves the accumulator intact, but adds a number to the program counter itself!
|
||||
|
||||
{{< latex >}}
|
||||
\frac{}
|
||||
{(c, a) \rightarrow_{\texttt{jmp} \; n} (c+n, a)}
|
||||
{{< /latex >}}
|
||||
|
||||
None of these rules have any premises, and they really are quite simple. Now, let's define the rules
|
||||
for evaluating a program. First of all, a program starting in a state that is not considered "valid"
|
||||
is done evaluating, and is in a "failed" state.
|
||||
|
||||
{{< latex >}}
|
||||
\frac{c \not \in v \quad c \not= \text{length}(p)}
|
||||
{(c, a, v) \Rightarrow_{p} (c, a, v)}
|
||||
{{< /latex >}}
|
||||
|
||||
We use \\(\\text{length}(p)\\) to represent the number of instructions in \\(p\\). Note the second premise:
|
||||
even if our program counter \\(c\\) is not included in the valid set, if it's "past the end of the program",
|
||||
the program terminates in an "ok" state.
|
||||
{{< sidenote "left" "avoid-c-note" "Here's a rule for terminating in the \"ok\" state:" >}}
|
||||
In the presented rule, we don't use the variable <code>c</code> all that much, and we know its concrete
|
||||
value (from the equality premise). We could thus avoid introducing the name \(c\) by
|
||||
replacing it with said known value:
|
||||
|
||||
{{< latex >}}
|
||||
\frac{}
|
||||
{(\text{length}(p), a, v) \Rightarrow_{p} (\text{length}(p), a, v)}
|
||||
{{< /latex >}}
|
||||
|
||||
This introduces some duplication, but that is really because all "base case" evaluation rules
|
||||
start and stop in the same state. To work around this, we could define a separate proposition
|
||||
to mean "program \(p\) is done in state \(s\)", then \(s\) will really only need to occur once,
|
||||
and so will \(\text{length}(p)\). This is, in fact, what we will do later on,
|
||||
since being able to talk abut "programs being done" will help us with
|
||||
components of our proof.
|
||||
{{< /sidenote >}}
|
||||
|
||||
{{< latex >}}
|
||||
\frac{c = \text{length}(p)}
|
||||
{(c, a, v) \Rightarrow_{p} (c, a, v)}
|
||||
{{< /latex >}}
|
||||
|
||||
When our program counter reaches the end of the program, we are also done evaluating it. Even though
|
||||
both rules {{< sidenote "right" "redundant-note" "lead to the same conclusion," >}}
|
||||
In fact, if the end of the program is never included in the valid set, the second rule is completely redundant.
|
||||
{{< /sidenote >}}
|
||||
it helps to distinguish the two possible outcomes. Finally, if neither of the termination conditions are met,
|
||||
our program can take a step, and continue evaluating from there.
|
||||
|
||||
{{< latex >}}
|
||||
\frac{c \in v \quad p[c] = i \quad (c, a) \rightarrow_i (c', a') \quad (c', a', v - \{c\}) \Rightarrow_p (c'', a'', v'')}
|
||||
{(c, a, v) \Rightarrow_{p} (c'', a'', v'')}
|
||||
{{< /latex >}}
|
||||
|
||||
This is quite a rule. A lot of things need to work out for a program to evauate from a state that isn't
|
||||
currently the final state:
|
||||
|
||||
* The current program counter \\(c\\) must be valid. That is, it must be an element of \\(v\\).
|
||||
* This program counter must correspond to an instruction \\(i\\) in \\(p\\), which we write as \\(p[c] = i\\).
|
||||
* This instruction must be executed, changing our program counter from \\(c\\) to \\(c'\\) and our
|
||||
accumulator from \\(a\\) to \\(a'\\). The set of valid instructions will no longer include \\(c\\),
|
||||
and will become \\(v - \\{c\\}\\).
|
||||
* Our program must then finish executing, starting at state
|
||||
\\((c', a', v - \\{c\\})\\), and ending in some (unknown) state \\((c'', a'', v'')\\).
|
||||
|
||||
If all of these conditions are met, our program, starting at \\((c, a, v)\\), will terminate in the state \\((c'', a'', v'')\\). This third rule completes our semantics; a program being executed will keep running instructions using the third rule, until it finally
|
||||
hits an invalid program counter (terminating with the first rule) or gets to the end of the program (terminating with the second rule).
|
||||
|
||||
#### Aside: Vectors and Finite \\(\mathbb{N}\\)
|
||||
We'll be getting to the Coq implementation of our semantics soon, but before we do:
|
||||
what type should \\(c\\) be? It's entirely possible for an instruction like \\(\\texttt{jmp} \\; -10000\\)
|
||||
to throw our program counter way before the first instruction of our program, so at first, it seems
|
||||
as though we should use an integer. But the prompt doesn't even specify what should happen in this
|
||||
case - it only says an instruction shouldn't be run twice. The "valid set", although it may help resolve
|
||||
this debate, is our invention, and isn't part of the original specification.
|
||||
|
||||
There is, however, something we can infer from this problem. Since the problem of jumping "too far behind" or
|
||||
"too far ahead" is never mentioned, we can assume that _all jumps will lead either to an instruction,
|
||||
or right to the end of a program_. This means that \\(c\\) is a natural number, with
|
||||
|
||||
{{< latex >}}
|
||||
0 \leq c \leq \text{length}(p)
|
||||
{{< /latex >}}
|
||||
|
||||
In a language like Coq, it's possible to represent such a number. Since we've gotten familliar with
|
||||
inference rules, let's present two rules that define such a number:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{n \in \mathbb{N}^+}
|
||||
{Z : \text{Fin} \; n}
|
||||
\quad
|
||||
\frac
|
||||
{f : \text{Fin} \; n}
|
||||
{S f : \text{Fin} \; (n+1)}
|
||||
{{< /latex >}}
|
||||
|
||||
This is a variation of the [Peano encoding](https://wiki.haskell.org/Peano_numbers) of natural numbers.
|
||||
It reads as follows: zero (\\(Z\\)) is a finite natural number less than any positive natural number \\(n\\). Then, if a finite natural number
|
||||
\\(f\\) is less than \\(n\\), then adding one to that number (using the successor function \\(S\\))
|
||||
will create a natural number less than \\(n+1\\). We encode this in Coq as follows
|
||||
([originally from here](https://coq.inria.fr/library/Coq.Vectors.Fin.html#t)):
|
||||
|
||||
```Coq
|
||||
Inductive t : nat -> Set :=
|
||||
| F1 : forall {n}, t (S n)
|
||||
| FS : forall {n}, t n -> t (S n).
|
||||
```
|
||||
|
||||
The `F1` constructor here is equivalent to our \\(Z\\), and `FS` is equivalent to our \\(S\\).
|
||||
To represent positive natural numbers \\(\\mathbb{N}^+\\), we simply take a regular natural
|
||||
number from \\(\mathbb{N}\\) and find its successor using `S` (simply adding 1). Again, we have
|
||||
to explicitly use `forall` in our type signatures.
|
||||
|
||||
We can use a similar technique to represent a list with a known number of elements, known
|
||||
in the Idris and Coq world as a vector. Again, we only need two inference rules to define such
|
||||
a vector:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{t : \text{Type}}
|
||||
{[] : \text{Vec} \; t \; 0}
|
||||
\quad
|
||||
\frac
|
||||
{x : \text{t} \quad \textit{xs} : \text{Vec} \; t \; n}
|
||||
{(x::\textit{xs}) : \text{Vec} \; t \; (n+1)}
|
||||
{{< /latex >}}
|
||||
|
||||
These rules read: the empty list \\([]\\) is zero-length vector of any type \\(t\\). Then,
|
||||
if we take an element \\(x\\) of type \\(t\\), and an \\(n\\)-long vector \\(\textit{xs}\\) of \\(t\\),
|
||||
then we can prepend \\(x\\) to \\(\textit{xs}\\) and get an \\((n+1)\\)-long vector of \\(t\\).
|
||||
In Coq, we write this as follows ([originally from here](https://coq.inria.fr/library/Coq.Vectors.VectorDef.html#t)):
|
||||
|
||||
```Coq
|
||||
Inductive t A : nat -> Type :=
|
||||
| nil : t A 0
|
||||
| cons : forall (h:A) (n:nat), t A n -> t A (S n).
|
||||
```
|
||||
|
||||
The `nil` constructor represents the empty list \\([]\\), and `cons` represents
|
||||
the operation of prepending an element (called `h` in the code and \\(x\\) in our inference rules)
|
||||
to another vector of length \\(n\\), which remains unnamed in the code but is called \\(\\textit{xs}\\) in our rules.
|
||||
|
||||
These two definitions work together quite well. For instance, suppose we have a vector of length \\(n\\).
|
||||
If we were to access its elements by indices starting at 0, we'd be allowed to access indices 0 through \\(n-1\\).
|
||||
These are precisely the values of the finite natural numbers less than \\(n\\), \\(\\text{Fin} \\; n \\).
|
||||
Thus, given such an index \\(\\text{Fin} \\; n\\) and a vector \\(\\text{Vec} \\; t \\; n\\), we are guaranteed
|
||||
to be able to retrieve the element at the given index! In our code, we will not have to worry about bounds checking.
|
||||
|
||||
Of course, if our program has \\(n\\) elements, our program counter will be a finite number less than \\(n+1\\),
|
||||
since there's always the possibility of it pointing past the instructions, indicating that we've finished
|
||||
running the program. This leads to some minor complications: we can't safely access the program instruction
|
||||
at index \\(\\text{Fin} \\; (n+1)\\). We can solve this problem by considering two cases:
|
||||
either our index points one past the end of the program (in which case its value is exactly the finite
|
||||
representation of \\(n\\)), or it's less than \\(n\\), in which case we can "tighten" the upper bound,
|
||||
and convert that index into a \\(\\text{Fin} \\; n\\). We formalize it in a lemma:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 80 82 >}}
|
||||
|
||||
There's a little bit of a gotcha here. Instead of translating our above statement literally,
|
||||
and returning a value that's the result of "tightening" our input `f`, we return a value
|
||||
`f'` that can be "weakened" to `f`. This is because "tightening" is not a total function -
|
||||
it's not always possible to convert a \\(\\text{Fin} \\; (n+1)\\) into a \\(\\text{Fin} \\; n\\).
|
||||
However, "weakening" \\(\\text{Fin} \\; n\\) _is_ a total function, since a number less than \\(n\\)
|
||||
is, by the transitive property of a total order, also less than \\(n+1\\).
|
||||
|
||||
The Coq proof for this claim is as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 88 97 >}}
|
||||
|
||||
The `Fin.rectS` function is a convenient way to perform inductive proofs over
|
||||
our finite natural numbers. Informally, our proof proceeds as follows:
|
||||
|
||||
* If the current finite natural number is zero, take a look at the "bound" (which
|
||||
we assume is nonzero, since there isn't a natural number less than zero).
|
||||
* If this "bounding number" is one, our `f` can't be tightened any further,
|
||||
since doing so would create a number less than zero. Fortunately, in this case,
|
||||
`n` must be `0`, so `f` is the finite representation of `n`.
|
||||
* Otherwise, `f` is most definitely a weakened version of another `f'`,
|
||||
since the tightest possible type for zero has a "bounding number" of one, and
|
||||
our "bounding number" is greater than that. We return a tighter version of our finite zero.
|
||||
* If our number is a successor of another finite number, we check if that other number
|
||||
can itself be tightened.
|
||||
* If it can't be tightened, then our smaller number is a finite representation of
|
||||
`n-1`. This, in turn, means that adding one to it will be the finite representation
|
||||
of `n` (if \\(x\\) is equal to \\(n-1\\), then \\(x+1\\) is equal to \\(n\\)).
|
||||
* If it _can_ be tightened, then so can the successor (if \\(x\\) is less
|
||||
than \\(n-1\\), then \\(x+1\\) is less than \\(n\\)).
|
||||
|
||||
Next, let's talk about addition, specifically the kind of addition done by the \\(\\texttt{jmp}\\) instruction.
|
||||
We can always add an integer to a natural number, but we can at best guarantee that the result
|
||||
will be an integer. For instance, we can add `-1000` to `1`, and get `-999`, which is _not_ a natural
|
||||
number. We implement this kind of addition in a function called `jump_t`:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 56 56 >}}
|
||||
|
||||
At the moment, its definition is not particularly important. What is important, though,
|
||||
is that it takes a bounded natural number `pc` (our program counter), an integer `off`
|
||||
(the offset provided by the jump instruction) and returns another integer representing
|
||||
the final offset. Why are integers of type `t`? Well, it so happens
|
||||
that Coq provides facilities for working with arbitrary implementations of integers,
|
||||
without relying on how they are implemented under the hood. This can be seen in its
|
||||
[`Coq.ZArith.Int`](https://coq.inria.fr/library/Coq.ZArith.Int.html) module,
|
||||
which describes what functions and types an implementation of integers should provide.
|
||||
Among those is `t`, the type of an integer in such an arbitrary implementation. We too
|
||||
will not make an assumption about how the integers are implemented, and simply
|
||||
use this generic `t` from now on.
|
||||
|
||||
Now, suppose we wanted to write a function that _does_ return a valid program
|
||||
counter after adding the offset to it. Since it's possible for this function to fail
|
||||
(for instance, if the offset is very negative), it has to return `option (fin (S n))`.
|
||||
That is, this function may either fail (returning `None`) or succeed, returning
|
||||
`Some f`, where `f` is of type `fin (S n)`, aka \\(\\text{Fin} \\; (n + 1)\\). Here's
|
||||
the function in Coq (again, don't worry too much about the definition):
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 61 61 >}}
|
||||
|
||||
We will make use of this function when we define and verify our semantics.
|
||||
Let's take a look at that next.
|
||||
|
||||
#### Semantics in Coq
|
||||
|
||||
Now that we've seen finite sets and vectors, it's time to use them to
|
||||
encode our semantics in Coq. Before we do anything else, we need
|
||||
to provide Coq definitions for the various components of our
|
||||
language, much like what we did with `tinylang`. We can start with opcodes:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 20 23 >}}
|
||||
|
||||
Now we can define a few other parts of our language and semantics, namely
|
||||
states, instructions and programs (which I called "inputs" since, we'll, they're
|
||||
our puzzle input). A state is simply the 3-tuple of the program counter, the set
|
||||
of valid program counters, and the accumulator. We write it as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 33 33 >}}
|
||||
|
||||
The star `*` is used here to represent a [product type](https://en.wikipedia.org/wiki/Product_type)
|
||||
rather than arithmetic multiplication. Our state type accepts an argument,
|
||||
`n`, much like a finite natural number or a vector. In fact, this `n` is passed on
|
||||
to the state's program counter and set types. Rightly, a state for a program
|
||||
of length \\(n\\) will not be of the same type as a state for a program of length \\(n+1\\).
|
||||
|
||||
An instruction is also a tuple, but this time containing only two elements: the opcode and
|
||||
the number. We write this as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 36 36 >}}
|
||||
|
||||
Finally, we have to define the type of a program. This type will also be
|
||||
indexed by `n`, the program's length. A program of length `n` is simply a
|
||||
vector of instructions `inst` of length `n`. This leads to the following
|
||||
definition:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 38 38 >}}
|
||||
|
||||
So far, so good! Finally, it's time to get started on the semantics themselves.
|
||||
We begin with the inductive definition of \\((\\rightarrow_i)\\).
|
||||
I think this is fairly straightforward. However, we do use
|
||||
`t` instead of \\(n\\) from the rules, and we use `FS`
|
||||
instead of \\(+1\\). Also, we make the formerly implicit
|
||||
assumption that \\(c+n\\) is valid explicit, by
|
||||
providing a proof that `valid_jump_t pc t = Some pc'`.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 103 110 >}}
|
||||
|
||||
Next, it will help us to combine the premises for
|
||||
"failed" and "ok" terminations into Coq data types.
|
||||
This will make it easier for us to formulate a lemma later on.
|
||||
Here are the definitions:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 112 117 >}}
|
||||
|
||||
Since all of out "termination" rules start and
|
||||
end in the same state, there's no reason to
|
||||
write that state twice. Thus, both `done`
|
||||
and `stuck` only take the input `inp`,
|
||||
and the state, which includes the accumulator
|
||||
`acc`, the set of allowed program counters `v`, and
|
||||
the program counter at which the program came to an end.
|
||||
When the program terminates successfully, this program
|
||||
counter will be equal to the length of the program `n`,
|
||||
so we use `nat_to_fin n`. On the other hand, if the program
|
||||
terminates in as stuck state, it must be that it terminated
|
||||
at a program counter that points to an instruction. Thus, this
|
||||
program counter is actually a \\(\\text{Fin} \\; n\\), and not
|
||||
a \\(\\text{Fin} \\ (n+1)\\), and is not in the set of allowed program counters.
|
||||
We use the same "weakening" trick we saw earlier to represent
|
||||
this.
|
||||
|
||||
Finally, we encode the three inference rules we came up with:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 119 126 >}}
|
||||
|
||||
Notice that we fused two of the premises in the last rule.
|
||||
Instead of naming the instruction at the current program
|
||||
counter (by writing \\(p[c] = i\\)) and using it in another premise, we simply use
|
||||
`nth inp pc`, which corresponds to \\(p[c]\\) in our
|
||||
"paper" semantics.
|
||||
|
||||
Before we go on writing some actual proofs, we have
|
||||
one more thing we have to address. Earlier, we said:
|
||||
|
||||
> All jumps will lead either to an instruction, or right to the end of a program.
|
||||
|
||||
To make Coq aware of this constraint, we'll have to formalize it. To
|
||||
start off, we'll define the notion of a "valid instruction", which is guaranteed
|
||||
to keep the program counter in the correct range.
|
||||
There are a couple of ways to do this, but we'll use yet another definition based
|
||||
on inference rules. First, though, observe that the same instruction may be valid
|
||||
for one program, and invalid for another. For instance, \\(\\texttt{jmp} \\; 100\\)
|
||||
is perfectly valid for a program with thousands of instructions, but if it occurs
|
||||
in a program with only 3 instructions, it will certainly lead to disaster. Specifically,
|
||||
the validity of an instruction depends on the length of the program in which it resides,
|
||||
and the program counter at which it's encountered.
|
||||
Thus, we refine our idea of validity to "being valid for a program of length \\(n\\) at program counter \\(f\\)".
|
||||
For this, we can use the following two inference rules:
|
||||
|
||||
{{< latex >}}
|
||||
\frac
|
||||
{c : \text{Fin} \; n}
|
||||
{\texttt{add} \; t \; \text{valid for} \; n, c }
|
||||
\quad
|
||||
\frac
|
||||
{c : \text{Fin} \; n \quad o \in \{\texttt{nop}, \texttt{jmp}\} \quad J_v(c, t) = \text{Some} \; c' }
|
||||
{o \; t \; \text{valid for} \; n, c }
|
||||
{{< /latex >}}
|
||||
|
||||
The first rule states that if a program has length \\(n\\), then \\(\\texttt{add}\\) is valid
|
||||
at any program counter whose value is less than \\(n\\). This is because running
|
||||
\\(\\texttt{add}\\) will increment the program counter \\(c\\) by 1,
|
||||
and thus, create a new program counter that's less than \\(n+1\\),
|
||||
which, as we discussed above, is perfectly valid.
|
||||
|
||||
The second rule works for the other two instructions. It has an extra premise:
|
||||
the result of `jump_valid_t` (written as \\(J_v\\)) has to be \\(\\text{Some} \\; c'\\),
|
||||
that is, `jump_valid_t` must succeed. Note that we require this even for no-ops,
|
||||
since it later turns out that one of the them may be a jump after all.
|
||||
|
||||
We now have our validity rules. If an instruction satisfies them for a given program
|
||||
and at a given program counter, evaluating it will always result in a program counter that has a proper value.
|
||||
We encode the rules in Coq as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 152 157 >}}
|
||||
|
||||
Note that we have three rules instead of two. This is because we "unfolded"
|
||||
\\(o\\) from our second rule: rather than using set notation (or "or"), we
|
||||
just generated two rules that vary in nothing but the operation involved.
|
||||
|
||||
Of course, we must have that every instruction in a program is valid.
|
||||
We don't really need inference rules for this, as much as a "forall" quantifier.
|
||||
A program \\(p\\) of length \\(n\\) is valid if the following holds:
|
||||
|
||||
{{< latex >}}
|
||||
\forall (c : \text{Fin} \; n). p[c] \; \text{valid for} \; n, c
|
||||
{{< /latex >}}
|
||||
|
||||
That is, for every possible in-bounds program counter \\(c\\),
|
||||
the instruction at the program counter is valid. We can now
|
||||
encode this in Coq, too:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 160 161 >}}
|
||||
|
||||
In the above, `n` is made implicit where possible.
|
||||
Since \\(c\\) (called `pc` in the code) is of type \\(\\text{Fin} \\; n\\), there's no
|
||||
need to write \\(n\\) _again_. The curly braces tell Coq to infer that
|
||||
argument where possible.
|
||||
|
||||
### Proving Termination
|
||||
Here we go! It's finally time to make some claims about our
|
||||
definitions. Who knows - maybe we wrote down total garbage!
|
||||
We will be creating several related lemmas and theorems.
|
||||
All of them share two common assumptions:
|
||||
|
||||
* We have some valid program `inp` of length `n`.
|
||||
* This program is a valid input, that is, `valid_input` holds for it.
|
||||
There's no sense in arguing based on an invalid input program.
|
||||
|
||||
We represent these grouped assumptions by opening a Coq
|
||||
`Section`, which we call `ValidInput`, and listing our assumptions:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 163 166 >}}
|
||||
|
||||
We had to also explicitly mention the length `n` of our program.
|
||||
From now on, the variables `n`, `inp`, and `Hv` will be
|
||||
available to all of the proofs we write in this section.
|
||||
The first proof is rather simple. The claim is:
|
||||
|
||||
> For our valid program, at any program counter `pc`
|
||||
and accumulator `acc`, there must exist another program
|
||||
counter `pc'` and accumulator `acc'` such that the
|
||||
instruction evaluation relation \\((\rightarrow_i)\\)
|
||||
connects the two. That is, valid addresses aside,
|
||||
we can always make a step.
|
||||
|
||||
Here is this claim encoded in Coq:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 168 169 >}}
|
||||
|
||||
We start our proof by introducing all the relevant variables into
|
||||
the global context. I've mentioned this when I wrote about
|
||||
day 1, but here's the gist: the `intros` keyword takes
|
||||
variables from a `forall`, and makes them concrete.
|
||||
In short, `intros x` is very much like saying "suppose
|
||||
we have an `x`", and going on with the proof.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 170 171 >}}
|
||||
|
||||
Here, we said "take any program counter `pc` and any
|
||||
accumulator `acc`". Now what? Well, first of all,
|
||||
we want to take a look at the instruction at the current
|
||||
`pc`. We know that this instruction is a combination
|
||||
of an opcode and a number, so we use `destruct` to get
|
||||
access to both of these parts:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 172 172 >}}
|
||||
|
||||
Now, Coq reports the following proof state:
|
||||
|
||||
```
|
||||
1 subgoal
|
||||
|
||||
n : nat
|
||||
inp : input n
|
||||
Hv : valid_input inp
|
||||
pc : Fin.t n
|
||||
acc : t
|
||||
o : opcode
|
||||
t0 : t
|
||||
Hop : nth inp pc = (o, t0)
|
||||
|
||||
========================= (1 / 1)
|
||||
|
||||
exists (pc' : fin (S n)) (acc' : t),
|
||||
step_noswap (o, t0) (pc, acc) (pc', acc')
|
||||
```
|
||||
|
||||
We have some opcode `o`, and some associated number
|
||||
`t0`, and we must show that there exist a `pc'`
|
||||
and `acc'` to which we can move on. To prove
|
||||
that something exists in Coq, we must provide
|
||||
an instance of that "something". If we claim
|
||||
that there exists a dog that's not a good boy,
|
||||
we better have this elusive creature in hand.
|
||||
In other words, proofs in Coq are [constructive](https://en.wikipedia.org/wiki/Constructive_proof).
|
||||
Without knowing the kind of operation we're dealing with, we can't
|
||||
say for sure how the step will proceed. Thus, we proceed by
|
||||
case analysis on `o`.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 173 173 >}}
|
||||
|
||||
There are three possible cases we have to consider,
|
||||
one for each type of instruction.
|
||||
|
||||
* If the instruction is \\(\\texttt{add}\\), we know
|
||||
that `pc' = pc + 1` and `acc' = acc + t0`. That is,
|
||||
the program counter is simply incremented, and the accumulator
|
||||
is modified with the number part of the instruction.
|
||||
* If the instruction is \\(\\texttt{nop}\\), the program
|
||||
coutner will again be incremented (`pc' = pc + 1`),
|
||||
but the accumulator will stay the same, so `acc' = acc`.
|
||||
* If the instruction is \\(\\texttt{jmp}\\), things are
|
||||
more complicated. We must rely on the assumption
|
||||
that our input is valid, which tells us that adding
|
||||
`t0` to our `pc` will result in `Some f`, and not `None`.
|
||||
Given this, we have `pc' = f`, and `acc' = acc`.
|
||||
|
||||
This is how these three cases are translated to Coq:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 174 177 >}}
|
||||
|
||||
For the first two cases, we simply provide the
|
||||
values we expect for `pc'` and `acc'`, and
|
||||
apply the corresponding inference rule that
|
||||
is satisfied by these values. For the third case, we have
|
||||
to invoke `Hv`, the hypothesis that our input is valid.
|
||||
In particular, we care about the instruction at `pc`,
|
||||
so we use `specialize` to plug `pc` into the more general
|
||||
hypothesis. We then replace `nth inp pc` with its known
|
||||
value, `(jmp, t0)`. This tells us the following, in Coq's words:
|
||||
|
||||
```
|
||||
Hv : valid_inst (jmp, t0) pc
|
||||
```
|
||||
|
||||
That is, `(jmp, t0)` is a valid instruction at `pc`. Then, using
|
||||
Coq's `inversion` tactic, we ask: how is this possible? There is
|
||||
only one inference rule that gives us such a conclusion, and it is named `valid_inst_jmp`
|
||||
in our Coq code. Since we have a proof that our `jmp` is valid,
|
||||
it must mean that this rule was used. Furthermore, since this
|
||||
rule requires that `valid_jump_t` evaluates to `Some f'`, we know
|
||||
that this must be the case here! Coq now has adds the following
|
||||
two lines to our proof state:
|
||||
|
||||
```
|
||||
f' : fin (S n)
|
||||
H0 : valid_jump_t pc t0 = Some f'
|
||||
```
|
||||
|
||||
Finally, we specify, as mentioned earlier, that `pc' = f'` and `acc' = acc`.
|
||||
As before, we apply the corresponding step rule for `jmp`. When it asks
|
||||
for a proof that `valid_jump_t` produces a valid program counter,
|
||||
we hand it `H0` using `apply H0`. And with that, Coq is happy!
|
||||
|
||||
Next, we prove a claim that a valid program can always do _something_,
|
||||
and that something is one of three things:
|
||||
|
||||
* It can terminate in the "ok" state if the program counter
|
||||
reaches the programs' end.
|
||||
* It can terminate with an error if it's currently at a program
|
||||
counter that is not included in the valid set.
|
||||
* Otherwise, it can run the current instruction and advance
|
||||
to a "next" state.
|
||||
|
||||
Alternatively, we could say that one of the inference rules
|
||||
for \\((\\Rightarrow_p)\\) must apply. This is not the case if the input
|
||||
is not valid, since, as I said
|
||||
before, an arbitrary input program can lead us to jump
|
||||
to a negative address (or to an address _way_ past the end of the program).
|
||||
Here's the claim, translated to Coq:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 181 186 >}}
|
||||
|
||||
Informally, we can prove this as follows:
|
||||
|
||||
* If the current program counter is equal to the length
|
||||
of the program, we've reached the end. Thus, the program
|
||||
can terminate in the "ok" state.
|
||||
* Otherwise, the current program counter must be
|
||||
less than the length of the program.
|
||||
* If we've already encountered this program counter (that is,
|
||||
if it's gone from the set of valid program counters),
|
||||
then the program will terminate in the "error" state.
|
||||
* Otherwise, the program counter is in the set of
|
||||
valid instructions. By our earlier theorem, in a valid
|
||||
program, the instruction at any program counter can be correctly
|
||||
executed, taking us to the next state. Now too
|
||||
our program can move to this next state.
|
||||
|
||||
Below is the Coq translation of the above.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 187 203 >}}
|
||||
|
||||
It doesn't seem like we're that far from being done now.
|
||||
A program can always take a step, and each time it does,
|
||||
the set of valid program counters decreases in size. Eventually,
|
||||
this set will become empty, so if nothing else, our program will
|
||||
eventually terminate in an "error" state. Thus, it will stop
|
||||
running no matter what.
|
||||
|
||||
This seems like a task for induction, in this case on the size
|
||||
of the valid set. In particular, strong mathematical induction
|
||||
{{< sidenote "right" "strong-induction-note" "seem to work best." >}}
|
||||
Why strong induction? If we remove a single element from a set,
|
||||
its size should decrease strictly by 1. Thus, why would we need
|
||||
to care about sets of <em>all</em> sizes less than the current
|
||||
set's size?<br>
|
||||
<br>
|
||||
Unfortunately, we're not working with purely mathematical sets.
|
||||
Coq's default facility for sets is simply a layer on top
|
||||
of good old lists, and makes no effort to be "correct by construction".
|
||||
It is thus perfectly possible to have a "set" which inlcudes an element
|
||||
twice. Depending on the implementation of <code>set_remove</code>,
|
||||
we may end up removing the repeated element multiple times, thereby
|
||||
shrinking the length of our list by more than 1. I'd rather
|
||||
not worry about implementation details like that.
|
||||
{{< /sidenote >}}
|
||||
Someone on StackOverflow [implemented this](https://stackoverflow.com/questions/45872719/how-to-do-induction-on-the-length-of-a-list-in-coq),
|
||||
so I'll just use it. The Coq theorem corresonding to strong induction
|
||||
on the length of a list is as follows:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 205 207 >}}
|
||||
|
||||
It reads,
|
||||
|
||||
> If for some list `l`, the property `P` holding for all lists
|
||||
shorter than `l` means that it also holds for `l` itself, then
|
||||
`P` holds for all lists.
|
||||
|
||||
This is perhaps not particularly elucidating. We can alternatively
|
||||
think of this as trying to prove some property for all lists `l`.
|
||||
We start with all empty lists. Here, we have nothing else to rely
|
||||
on; there are no lists shorter than the empty list, and our property
|
||||
must hold for all empty lists. Then, we move on to proving
|
||||
the property for all lists of length 1, already knowing that it holds
|
||||
for all empty lists. Once we're done there, we move on to proving
|
||||
that `P` holds for all lists of length 2, now knowing that it holds
|
||||
for all empty lists _and_ all lists of length 1. We continue
|
||||
doing this, eventually covering lists of any length.
|
||||
|
||||
Before proving termination, there's one last thing we have to
|
||||
take care off. Coq's standard library does not come with
|
||||
a proof that removing an element from a set makes it smaller;
|
||||
we have to provide it ourselves. Here's the claim encoded
|
||||
in Coq:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 217 219 >}}
|
||||
|
||||
This reads, "if a set `s` contains a finite natural
|
||||
number `f`, removing `f` from `s` reduces the set's size".
|
||||
The details of the proof are not particularly interesting,
|
||||
and I hope that you understand intuitively why this is true.
|
||||
Finally, we make our termination claim.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 230 231 >}}
|
||||
|
||||
It's quite a strong claim - given _any_ program counter,
|
||||
set of valid addresses, and accumulator, a valid input program
|
||||
will terminate. Let's take a look at the proof.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 232 234 >}}
|
||||
|
||||
We use `intros` again. However, it brings in variables
|
||||
in order, and we really only care about the _second_ variable.
|
||||
We thus `intros` the first two, and then "put back" the first
|
||||
one using `generalize dependent`. Then, we proceed by
|
||||
induction on length, as seen above.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 235 236>}}
|
||||
|
||||
Now we're in the "inductive step". Our inductive hypothesis
|
||||
is that any set of valid addresses smaller than the current one will
|
||||
guarantee that the program will terminate. We must show
|
||||
that using our set, too, will guarantee termination. We already
|
||||
know that a valid input, given a state, can have one of three
|
||||
possible outcomes: "ok" termination, "failed" termination,
|
||||
or a "step". We use `destruct` to take a look at each of these
|
||||
in turn. The first two cases ("ok" termination and "failed" termination)
|
||||
are fairly trivial:
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 237 240 >}}
|
||||
|
||||
We basically connect the dots between the premises (in a form like `done`)
|
||||
and the corresponding inference rule (`run_noswap_ok`). The more
|
||||
interesting case is when we can take a step.
|
||||
|
||||
{{< codelines "Coq" "aoc-2020/day8.v" 241 253 >}}
|
||||
|
||||
Since we know we can take a step, we know that we'll be removing
|
||||
the current program counter from the set of valid addresses. This
|
||||
set must currently contain the present program counter (since otherwise
|
||||
we'd have "failed"), and thus will shrink when we remove it. This,
|
||||
in turn, lets us use the inductive hypothesis: it tells us that no matter the
|
||||
program counter or accumulator, if we start with this new "shrunk"
|
||||
set, we will terminate in some state. Coq's constructive
|
||||
nature helps us here: it doesn't just tells us that there is some state
|
||||
in which we terminate - it gives us that state! We use `edestruct` to get
|
||||
a handle on this final state, which Coq automatically names `x`. At this
|
||||
time Coq still isn't convinced that our new set is smaller, so we invoke
|
||||
our earlier `set_remove_length` theorem to placate it.
|
||||
|
||||
We now have all the pieces: we know that we can take a step, removing
|
||||
the current program counter from our current set. We also know that
|
||||
with that newly shrunken set, we'll terminate in some final state `x`.
|
||||
Thus, all that's left to say is to apply our "step" rule. It asks
|
||||
us for three things:
|
||||
|
||||
1. That the current program counter is in the set. We've long since
|
||||
established this, and `auto` takes care of that.
|
||||
2. That a step is possible. We've already established this, too,
|
||||
since we're in the "can take a step" case. We apply `Hst`,
|
||||
the hypothesis that confirms that we can, indeed, step.
|
||||
3. That we terminate after this. The `x` we got
|
||||
from our induction hypothesis came with a proof that
|
||||
running with the "next" program counter and accumulator
|
||||
will result in termination. We apply this proof, automatically
|
||||
named `H0` by Coq.
|
||||
|
||||
And that's it! We've proved that a program terminates no matter what.
|
||||
This has also (almost!) given us a solution to part 1. Consider the case
|
||||
in which we start with program counter 0, accumulator 0, and the "full"
|
||||
set of allowed program counters. Since our proof works for _all_ configurations,
|
||||
it will also work for this one. Furthermore, since Coq proofs are constructive,
|
||||
this proof will __return to us the final program counter and accumulator!__
|
||||
This is precisely what we'd need to solve part 1.
|
||||
|
||||
But wait, almost? What's missing? We're missing a few implementation details:
|
||||
* We've not provided a concrete impelmentation of integers. The simplest
|
||||
thing to do here would be to use [`Coq.ZArith.BinInt`](https://coq.inria.fr/library/Coq.ZArith.BinInt.html),
|
||||
for which there is a module [`Z_as_Int`](https://coq.inria.fr/library/Coq.ZArith.Int.html#Z_as_Int)
|
||||
that provides `t` and friends.
|
||||
* We assumed (reasonably, I would say) that it's possible to convert a natural
|
||||
number to an integer. If we're using the aforementioned `BinInt` module,
|
||||
we can use [`Z.of_nat`](https://coq.inria.fr/library/Coq.ZArith.BinIntDef.html#Z.of_nat).
|
||||
* We also assumed (still reasonably) that we can try convert an integer
|
||||
back to a finite natural number, failing if it's too small or too large.
|
||||
There's no built-in function for this, but `Z`, for one, distinguishes
|
||||
between the "positive", "zero", and "negative" cases, and we have
|
||||
`Pos.to_nat` for the positive case.
|
||||
|
||||
Well, I seem to have covered all the implementation details. Why not just
|
||||
go ahead and solve the problem? I tried, and ran into two issues:
|
||||
|
||||
* Although this is "given", we assumed that our input program will be
|
||||
valid. For us to use the result of our Coq proof, we need to provide it
|
||||
a constructive proof that our program is valid. Creating this proof is tedious
|
||||
in theory, and quite difficult in practice: I've run into a
|
||||
strange issue trying to pattern match on finite naturals.
|
||||
* Even supposing we _do_ have a proof of validity, I'm not certain
|
||||
if it's possible to actually extract an answer from it. It seems
|
||||
that Coq distinguishes between proofs (things of type `Prop`) and
|
||||
values (things of type `Set`). things of types `Prop` are supposed
|
||||
to be _erased_. This means that when you convert Coq code,
|
||||
to, say, Haskell, you will see no trace of any `Prop`s in that generated
|
||||
code. Unfortunately, this also means we
|
||||
[can't use our proofs to construct values](https://stackoverflow.com/questions/27322979/why-coq-doesnt-allow-inversion-destruct-etc-when-the-goal-is-a-type),
|
||||
even though our proof objects do indeed contain them.
|
||||
|
||||
So, we "theoretically" have a solution to part 1, down to the algorithm
|
||||
used to compute it and a proof that our algorithm works. In "reality", though, we
|
||||
can't actually use this solution to procure an answer. Like we did with day 1, we'll have
|
||||
to settle for only a proof.
|
||||
|
||||
Let's wrap up for this post. It would be more interesting to devise and
|
||||
formally verify an algorithm for part 2, but this post has already gotten
|
||||
quite long and contains a lot of information. Perhaps I will revisit this
|
||||
at a later time. Thanks for reading!
|
||||
@@ -3,6 +3,7 @@ title: Compiling a Functional Language Using C++, Part 10 - Polymorphism
|
||||
date: 2020-03-25T17:14:20-07:00
|
||||
tags: ["C and C++", "Functional Languages", "Compilers"]
|
||||
description: "In this post, we extend our compiler's typechecking algorithm to implement the Hindley-Milner type system, allowing for polymorphic functions."
|
||||
favorite: true
|
||||
---
|
||||
|
||||
[In part 8]({{< relref "08_compiler_llvm.md" >}}), we wrote some pretty interesting programs in our little language.
|
||||
|
||||
@@ -984,5 +984,6 @@ Before either of those things, though, I think that I want to go through
|
||||
the compiler and perform another round of improvements, similarly to
|
||||
[part 4]({{< relref "04_compiler_improvements" >}}). It's hard to do a lot
|
||||
of refactoring while covering new content, since major changes need to
|
||||
be explained and presented for the post to make sense. I hope to see
|
||||
you in these future posts!
|
||||
be explained and presented for the post to make sense.
|
||||
I do this in [part 13]({{< relref "13_compiler_cleanup/index.md" >}}) - cleanup.
|
||||
I hope to see you there!
|
||||
|
||||
964
content/blog/13_compiler_cleanup/index.md
Normal file
@@ -0,0 +1,964 @@
|
||||
---
|
||||
title: Compiling a Functional Language Using C++, Part 13 - Cleanup
|
||||
date: 2020-09-19T16:14:13-07:00
|
||||
tags: ["C and C++", "Functional Languages", "Compilers"]
|
||||
description: "In this post, we clean up our compiler."
|
||||
---
|
||||
|
||||
In [part 12]({{< relref "12_compiler_let_in_lambda" >}}), we added `let/in`
|
||||
and lambda expressions to our compiler. At the end of that post, I mentioned
|
||||
that before we move on to bigger and better things, I wanted to take a
|
||||
step back and clean up the compiler. Now is the time to do that.
|
||||
|
||||
In particular, I identified four things that could be improved
|
||||
or cleaned up:
|
||||
|
||||
* __Error handling__. We need to stop using `throw 0` and start
|
||||
using `assert`. We can also make our errors much more descriptive
|
||||
by including source locations in the output.
|
||||
* __Name mangling__. I don't think I got it quite right last
|
||||
time. Now is the time to clean it up.
|
||||
* __Code organization__. I think we can benefit from a top-level
|
||||
class, and a more clear "dependency order" between the various
|
||||
classes and structures we've defined.
|
||||
* __Code style__. In particular, I've been lazily using `struct`
|
||||
in a lot of places. That's not a good idea; it's better
|
||||
to use `class`, and only expose _some_ fields and methods
|
||||
to the rest of the code.
|
||||
|
||||
### Error Reporting and Handling
|
||||
The previous post was rather long, which led me to omit
|
||||
a rather important aspect of the compiler: proper error reporting.
|
||||
Once again our compiler has instances of `throw 0`, which is a cheap way
|
||||
of avoiding properly handling a runtime error. Before we move on,
|
||||
it's best to get rid of such blatantly lazy code.
|
||||
|
||||
Our existing exceptions (mostly type errors) can use some work, too.
|
||||
Even the most descriptive issues our compiler reports -- unification errors --
|
||||
don't include the crucial information of _where_ the error is. For large
|
||||
programs, this means having to painstakingly read through the entire file
|
||||
to try figure out which subexpression could possibly have an incorrect type.
|
||||
This is far from the ideal debugging experience.
|
||||
|
||||
Addressing all this is a multi-step change in itself. We want to:
|
||||
|
||||
* Replace all `throw 0` code with actual exceptions.
|
||||
* Replace some exceptions that shouldn't be possible for a user to trigger
|
||||
with assertions.
|
||||
* Keep track of source locations of each subexpression, so that we may
|
||||
be able to print it if it causes an error.
|
||||
* Be able to print out said source locations at will. This isn't
|
||||
a _necessity_, but virtually all "big" compilers do this. Instead
|
||||
of reporting that an error occurs on a particular line, we will
|
||||
actually print the line.
|
||||
|
||||
Let's start with gathering the actual location data.
|
||||
|
||||
#### Bison's Locations
|
||||
Bison actually has some rather nice support for location tracking. It can
|
||||
automatically assemble the "from" and "to" locations of a nonterminal
|
||||
from the locations of children, which would be very tedious to write
|
||||
by hand. We enable this feature using the following option:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parser.y" 46 46 >}}
|
||||
|
||||
There's just one hitch, though. Sure, Bison can compute bigger
|
||||
locations from smaller ones, but it must get the smaller ones
|
||||
from somewhere. Since Bison operates on _tokens_, rather
|
||||
than _characters_, it effectively doesn't interact with the source
|
||||
text at all, and can't determine from which line or column a token
|
||||
originated. The task of determining the locations of input tokens
|
||||
is delegated to the tokenizer -- Flex, in our case. Flex, on the
|
||||
other hand, doesn't have a built-in mechanism for tracking
|
||||
locations. Fortunately, Bison provides a `yy::location` class that
|
||||
includes most of the needed functionality.
|
||||
|
||||
A `yy::location` consists of two source positions, `begin` and `end`,
|
||||
which themselves are represented using lines and columns. It
|
||||
also has the following methods:
|
||||
|
||||
* `yy::location::columns(int)` advances the `end` position by
|
||||
the given number of columns, while `begin` stays the same.
|
||||
If `begin` and `end` both point to the beginning of a token,
|
||||
then `columns(token_length)` will move `end` to the token's end,
|
||||
and thus make the whole `location` contain the token.
|
||||
* `yy::location::lines(int)` behaves similarly to `columns`,
|
||||
except that it advances `end` by the given number of lines,
|
||||
rather than columns. It also resets the columns counter to `1`.
|
||||
* `yy::location::step()` moves `begin` to where `end` is. This
|
||||
is useful for when we've finished processing a token, and want
|
||||
to move on to the next one.
|
||||
|
||||
For Flex specifically, `yyleng` has the length of the token
|
||||
currently being processed. Rather than adding the calls
|
||||
to `columns` and `step` to every rule, we can define the
|
||||
`YY_USER_ACTION` macro, which is run before each token
|
||||
is processed.
|
||||
|
||||
{{< codelines "C++" "compiler/13/scanner.l" 12 14 >}}
|
||||
|
||||
We'll see why we are using `LOC` instead of something like `location` soon;
|
||||
for now, you can treat `LOC` as if it were a global variable declared
|
||||
in the tokenizer. Before processing each token, we ensure that
|
||||
the `yy::location` has its `begin` and `end` at the same position,
|
||||
and then advance `end` by `yyleng` columns. This is
|
||||
{{< sidenote "right" "sufficient-note" "sufficient" >}}
|
||||
This doesn't hold for all languages. It may be possible for a language
|
||||
to have tokens that contain <code>\n</code>, in which case,
|
||||
rather than just using <code>yyleng</code>, we'd need to
|
||||
add special logic to iterate over the token and detect the line
|
||||
breaks.<br>
|
||||
<br>
|
||||
Also, this requires that the <code>end</code> of the previous token was
|
||||
correctly computed.
|
||||
{{< /sidenote >}}
|
||||
to make `LOC` represent our token's source position. For
|
||||
the moment, don't worry too much about `drv`; this is the
|
||||
parsing driver, and we will talk about it shortly.
|
||||
|
||||
So now we have a "global" variable `LOC` that gives
|
||||
us the source position of the current token. To get it
|
||||
to Bison, we have to pass it as an argument to each
|
||||
of the `make_TOKEN` calls. Here are a few sample lines
|
||||
that should give you the general idea:
|
||||
|
||||
{{< codelines "C++" "compiler/13/scanner.l" 40 43 >}}
|
||||
|
||||
That last line is actually new. Previously, we somehow
|
||||
got away without explicitly sending the end-of-file token to Bison.
|
||||
I suspect that this was due to some kind of implicit conversion
|
||||
of the Flex macro `YY_NULL` into a token; now that we have
|
||||
to pass a position to every token constructor, such an implicit
|
||||
conversion is probably impossible.
|
||||
|
||||
Now we have Bison computing source locations for each nonterminal.
|
||||
However, at the moment, we still aren't using them. To change that,
|
||||
we need to add a `yy::location` argument to each of our `ast` nodes,
|
||||
as well as to the `pattern` subclasses, `definition_defn` and
|
||||
`definition_data`. To avoid breaking all the code that creates
|
||||
AST nodes and definitions outside of the parser, we'll make this
|
||||
argument optional. Inside of `ast.hpp`, we define a new field as follows:
|
||||
|
||||
{{< codelines "C++" "compiler/13/ast.hpp" 16 16 >}}
|
||||
|
||||
Then, we add a constructor to `ast` as follows:
|
||||
|
||||
{{< codelines "C++" "compiler/13/ast.hpp" 18 18 >}}
|
||||
|
||||
Note that it's not optional here, since `ast` itself is an
|
||||
abstract class, and thus will never be constructed directly.
|
||||
It is in the subclasses of `ast` that we provide a default
|
||||
value. The change is rather mechanical, but here's an example
|
||||
from `ast_binop`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/ast.hpp" 98 99 >}}
|
||||
|
||||
Finally, we tell Bison to pass the computed location
|
||||
data as an argument when constructing our data structures.
|
||||
This too is a mechanical change, and I think the following
|
||||
few lines demonstrate the general idea in sufficient
|
||||
detail:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parser.y" 92 96 >}}
|
||||
|
||||
Here, the `@$` character is used to reference the current
|
||||
nonterminal's location data.
|
||||
|
||||
#### Line Offsets, File Input, and the Parsing Driver
|
||||
There are three more challenges with printing out the line
|
||||
of code where an error occurred. First of all, to
|
||||
print out a line of code, we need to have that line of code
|
||||
available to us. We do not currently meet this requirement:
|
||||
our compiler reads code form `stdin` (as is default for Flex),
|
||||
and `stdin` doesn't always support rewinding. This, in turn,
|
||||
means that once Flex has read a character from the input,
|
||||
it may not be possible to go back and retrieve that character
|
||||
again.
|
||||
|
||||
Second, even if we do have have the entire stream or buffer
|
||||
available to us, to retrieve an offset and length within
|
||||
that buffer from just a line and column number would be a lot
|
||||
of work. A naive approach would be to iterate through
|
||||
the input again, once more keeping track of lines and columns,
|
||||
and print the desired line once we reach it. However, this
|
||||
would lead us to redo a lot of work that our tokenizer
|
||||
is already doing.
|
||||
|
||||
Third, Flex's input mechanism, even if it it's configured
|
||||
not to read from `stdin`, uses a global file descriptor called
|
||||
`yyin`. However, we're better off minimizing global state (especially
|
||||
if we want to read, parse, and compile multiple files in
|
||||
the future). While we're configuring Flex's input mechanism,
|
||||
we may as well fix this, too.
|
||||
|
||||
There are several approaches to fixing the first issue. One possible
|
||||
way is to store the content of `stdin` into a temporary file. Then,
|
||||
it's possible to read from the file multiple times by using
|
||||
the C functions `fseek` and `rewind`. However, since we're
|
||||
working with files, why not just work directly with the files
|
||||
created by the user? Instead of reading from `stdin`, we may
|
||||
as well take in a path to a file via `argv`, and read from there.
|
||||
Also, instead of `fseek` and `rewind`, we can just read the file
|
||||
into memory, and access it like a normal character buffer. This
|
||||
does mean that we can stick with `stdin`, but it's more conventional
|
||||
to read source code from files, anyway.
|
||||
|
||||
To address the second issue, we can keep a mapping of line numbers
|
||||
to their locations in the source buffer. This is rather easy to
|
||||
maintain using an array: the first element of the array is 0,
|
||||
which is the beginning of the first line in any source file. From there,
|
||||
every time we encounter the character `\n`, we can push
|
||||
the current source location to the top, marking it as
|
||||
the beginning of another line. Where exactly we store this
|
||||
array is as yet unclear, since we're trying to avoid global variables.
|
||||
|
||||
Finally, to begin addressing the third issue, we can use Flex's `reentrant`
|
||||
option, which makes it so that all of the tokenizer's state is stored in an
|
||||
opaque `yyscan_t` structure, rather than in global variables. This way,
|
||||
we can configure `yyin` without setting a global variable, which is a step
|
||||
in the right direction. We'll work on this momentarily.
|
||||
|
||||
Our tokenizing and parsing stack has more global variables
|
||||
than just those specific to Flex. Among these variables is `global_defs`,
|
||||
which receives all the top-level function and data type definitions. We
|
||||
will also need some way of accessing the `yy::location` instance, and
|
||||
a way of storing our file input in memory. Fortunately, we're not
|
||||
the only ones to have ever come across the issue of creating non-global
|
||||
state: the Bison documentation has a
|
||||
[section in its C++ guide](https://www.gnu.org/software/bison/manual/html_node/Calc_002b_002b-Parsing-Driver.html)
|
||||
that describes a technique for manipulating
|
||||
state -- "parsing context", in their words. This technique involves the
|
||||
creation of a _parsing driver_.
|
||||
|
||||
The parsing driver is a class (or struct) that holds all the parse-related
|
||||
state. We can arrange for this class to be available to our tokenizing
|
||||
and parsing functions, which will allow us to use it pretty much like we'd
|
||||
use a global variable. This is the `drv` that we saw in `YY_USER_ACTION`.
|
||||
We can define it as follows:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.hpp" 36 54 >}}
|
||||
|
||||
There aren't many fields here. The `file_name` string represents
|
||||
the file that we'll be reading code from. The `location` field
|
||||
will be accessed by Flex via `get_current_location`. Bison will
|
||||
store the function and data type definitions it reads into `global_defs`
|
||||
via `get_global_defs`. Finally, `file_m` will be used to keep track
|
||||
of the content of the file we're reading, as well as the line offsets
|
||||
within that file. Notice that a couple of these fields are pointers
|
||||
that we take by reference in the constructor. The `parse_driver` doesn't
|
||||
_own_ the global definitions, nor the file manager. They exist outside
|
||||
of it, and will continue to be used in other ways the `parse_driver`
|
||||
does not need to know about. Also, the `LOC` variable in Flex is
|
||||
actually a call to `get_current_location`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/scanner.l" 15 15 >}}
|
||||
|
||||
The methods of `parse_driver` are rather simple. The majority of
|
||||
them deals with giving access to the parser's members: the `yy::location`,
|
||||
the `definition_group`, and the `file_mgr`. The only exception
|
||||
to this is `operator()`, which we use to actually trigger the parsing process.
|
||||
We'll make this method return `true` if parsing succeeded, and `false`
|
||||
otherwise (if, say, the file we tried to read doesn't exist).
|
||||
Here's its implementation:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 48 60 >}}
|
||||
|
||||
We try open the user-specified file, and return `false` if we can't.
|
||||
After this, we start doing the setup specific to a reentrant
|
||||
Flex scanner. We declare a `yyscan_t` variable, which
|
||||
will contain all of Flex's state. Then, we initialize
|
||||
it using `yylex_init`. Finally, since we can no longer
|
||||
touch the `yyin` global variable (it doesn't exist),
|
||||
we have to resort to using a setter function provided by Flex
|
||||
to configure the tokenizer's input stream.
|
||||
|
||||
Next, we construct our Bison-generated parser. Note that
|
||||
unlike before, we have to pass in two arguments:
|
||||
`scanner` and `*this`, the latter being of type `parse_driver&`.
|
||||
We'll come back to how this works in a moment. With
|
||||
the scanner and parser initialized, we invoke `parser::operator()`,
|
||||
which actually runs the Flex- and Bison-generated code.
|
||||
To clean up, we run `yylex_destroy` and `fclose`. Finally,
|
||||
we call `file_mgr::finalize`, and return. But what
|
||||
_is_ `file_mgr`?
|
||||
|
||||
The `file_mgr` class does two things: it stores the part of the file
|
||||
that has already been read by Flex in memory, and it keeps track of
|
||||
where each line in our source file begins within the text. Here is its
|
||||
definition:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.hpp" 14 34 >}}
|
||||
|
||||
In this class, the `string_stream` member is used to construct
|
||||
an `std::string` from the bits of text that Flex reads,
|
||||
processes, and feeds to the `file_mgr` using the `write` method.
|
||||
It's more efficient to use a string stream than to concatenate
|
||||
strings repeatedly. Once Flex is finished processing the file,
|
||||
the final contents of the `string_stream` are transferred into
|
||||
the `file_contents` string using the `finalize` method. The `offset`
|
||||
and `line_offsets` fields will be used as we described earlier: each time Flex
|
||||
encounters the `\n` character, the `offset` variable will pushed
|
||||
in top of the `line_offsets` vector, marking the beginning of
|
||||
the corresponding line. The methods of the class are as follows:
|
||||
|
||||
* `write` will be called from Flex, and will allow us to
|
||||
record the content of the file we're processing to the `string_stream`.
|
||||
We've already seen it used in the `YY_USER_ACTION` macro.
|
||||
* `mark_line` will also be called from Flex, and will mark the current
|
||||
`file_offset` as the beginning of a line by pushing it into `line_offsets`.
|
||||
* `finalize` will be called by the `parse_driver` when the parsing
|
||||
finishes. At this time, the `string_stream` should contain all of
|
||||
the input file, and this data is transferred to `file_contents`, as
|
||||
we mentioned above.
|
||||
* `get_index` and `get_line_end` will be used for converting
|
||||
`yy::location` instances to offsets within the source code buffer.
|
||||
* `print_location` will be used for printing errors.
|
||||
It will print the lines spanned by the given location, with the
|
||||
location itself colored and underlined if the last argument is `true`.
|
||||
This will make our errors easier on the eyes.
|
||||
|
||||
Let's take a look at their implementations. First, `write`.
|
||||
For the most part, this method is a proxy for the `write`
|
||||
method of our `string_stream`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 9 12 >}}
|
||||
|
||||
We do, however, also keep track of the `file_offset` variable
|
||||
here, which ensures we have up-to-date information
|
||||
regarding our position in the source file. The implementation
|
||||
of `mark_line` uses this information:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 14 16 >}}
|
||||
|
||||
The `finalize` method is trivial, and requires little additional
|
||||
discussion:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 18 20 >}}
|
||||
|
||||
Once we have the line offsets, `get_index` becomes very simple:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 22 25 >}}
|
||||
|
||||
Here, we use an assertion for the first time. Calling
|
||||
`get_index` with a negative or zero line doesn't make
|
||||
any sense, since Bison starts tracking line numbers
|
||||
at 1. Similarly, asking for a line for which we don't
|
||||
have a recorded offset is invalid. Both
|
||||
of these nonsensical calls to `get_index` cannot
|
||||
be caused by the user under normal circumstances,
|
||||
and indicate the method's misuse by the author of
|
||||
the compiler (us!). Thus, we terminate the program.
|
||||
|
||||
Finally, the implementation of `line_end` just finds the
|
||||
beginning of the next line. We stick to the C convention
|
||||
of marking 'end' indices exclusive (pointing just past
|
||||
the end of the array):
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 27 30 >}}
|
||||
|
||||
Since `line_offsets` has as many elements as there are lines,
|
||||
the last line number would be equal to the vector's size.
|
||||
When looking up the end of the last line, we can't look for
|
||||
the beginning of the next line, so instead we return the end of the file.
|
||||
|
||||
Next, the `print_location` method prints three sections
|
||||
of the source file. These are the text "before" the error,
|
||||
the error itself, and, finally, the text "after" the error.
|
||||
For example, if an error began on the fifth column of the third
|
||||
line, and ended on the eighth column of the fourth line, the
|
||||
"before" section would include the first four columns of the third
|
||||
line, and the "after" section would be the ninth column onward
|
||||
on the fourth line. Before and after the error itself,
|
||||
if the `highlight` argument is true,
|
||||
we sprinkle the ANSI escape codes to enable and disable
|
||||
special formatting, respectively. For now, the special
|
||||
formatting involves underlining the text and making it red.
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 32 46 >}}
|
||||
|
||||
Finally, to get the forward declarations for the `yy*` functions
|
||||
and types, we set the `header-file` option in Flex:
|
||||
|
||||
{{< codelines "C++" "compiler/13/scanner.l" 3 3 >}}
|
||||
|
||||
We also include this `scanner.hpp` file in our `parse_driver.cpp`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.cpp" 2 2 >}}
|
||||
|
||||
#### Adding the Driver to Flex and Bison
|
||||
Bison's C++ language template generates a class called
|
||||
`yy::parser`. We don't really want to modify this class
|
||||
in any way: not only is it generated code, but it's
|
||||
also rather complex. Instead, Bison provides us
|
||||
with a mechanism to pass more data in to the parser.
|
||||
This data is made available to all the actions
|
||||
that the parser runs. Better yet, Bison also attempts
|
||||
to pass this data on to the tokenizer, which in our
|
||||
case would mean that whatever data we provide Bison
|
||||
will also be available to Flex. This is how we'll
|
||||
allow the two components to access our new `parse_driver`
|
||||
class. This is also how we'll pass in the `yyscan_t`
|
||||
that Flex now needs to run its tokenizing code. To
|
||||
do all this, we use Bison's `%param` option. I'm
|
||||
going to include a few more lines from `parser.y`,
|
||||
since they contain the necessary `#include` directives
|
||||
and a required type definition:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parser.y" 1 18 >}}
|
||||
|
||||
The `%param` option effectively adds the parameter listed
|
||||
between the curly braces to the constructor of the generated
|
||||
`yy::parser`. We've already seen this in the implementation
|
||||
of our driver, where we passed `scanner` and `*this` as
|
||||
arguments when creating the parser. The parameters we declare are also passed to the
|
||||
`yylex` function, which is expected to accept them in the same order.
|
||||
|
||||
Since we're adding `parse_driver` as an argument we have to
|
||||
declare it. However, we can't include the `parse_driver` header
|
||||
right away because `parse_driver` itself includes the `parser` header:
|
||||
we'd end up with a circular dependency. Instead, we resort to
|
||||
forward-declaring the driver class, as well as the `yyscan_t`
|
||||
structure containing Flex's state.
|
||||
|
||||
Adding a parameter to Bison doesn't automatically affect
|
||||
Flex. To let Flex know that its `yylex` function must now accept
|
||||
the state and the parsing driver, we have to define the
|
||||
`YY_DECL` macro. We do this in `parse_driver.hpp`, since
|
||||
this forward declaration will be used by both Flex
|
||||
and Bison:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parse_driver.hpp" 56 58 >}}
|
||||
|
||||
#### Improving Exceptions
|
||||
Now, it's time to add location data (and a little bit more) to our
|
||||
exceptions. We want to make it possible for exceptions to include
|
||||
data about where the error occurred, and to print this data to the user.
|
||||
However, it's also possible for us to have exceptions that simply
|
||||
do not have that location data. Furthermore, we want to know
|
||||
whether or not an exception has an associated location; we'd
|
||||
rather not print an invalid or "default" location when an error
|
||||
occurs.
|
||||
|
||||
In the old days of programming, we could represent the absence
|
||||
of location data with a `nullptr`, or `NULL`. But not only
|
||||
does this approach expose us to all kind of `NULl`-safety
|
||||
bugs, but it also requires heap allocation! This doesn't
|
||||
make it sound all that appealing; instead, I think we should
|
||||
opt for using `std::optional`.
|
||||
|
||||
Though `std::optional` is standard (as may be obvious from its
|
||||
namespace), it's a rather recent addition to the C++ STL.
|
||||
In order to gain access to it, we need to ensure that our
|
||||
project is compiled using C++17. To this end, we add
|
||||
the following two lines to our CMakeLists.txt:
|
||||
|
||||
{{< codelines "CMake" "compiler/13/CMakeLists.txt" 5 6 >}}
|
||||
|
||||
Now, let's add a new base class for all of our compiler errors,
|
||||
unsurprisingly called `compiler_error`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/error.hpp" 10 26 >}}
|
||||
|
||||
We'll put some 'common' exception functionality
|
||||
into the `print_location` and `print_about` methods. If the error
|
||||
has an associated location, the former method will print that
|
||||
location to the screen. We don't always want to highlight
|
||||
the part of the code that caused the error: for instance,
|
||||
an invalid data type definition may span several lines,
|
||||
and coloring that whole section of text red would be
|
||||
too much. To address this, we add the `highlight`
|
||||
boolean argument, which can be used to switch the
|
||||
colors on and off. The `print_about` method
|
||||
will simply print the `what()` message of the exception,
|
||||
in addition to the "specific" error that occurred (stored
|
||||
in `description`). Here are the implementations of the
|
||||
functions:
|
||||
|
||||
{{< codelines "C++" "compiler/13/error.cpp" 3 16 >}}
|
||||
|
||||
We will also add a `pretty_print` method to all of
|
||||
our exceptions. This method will handle
|
||||
all the exception-specific printing logic.
|
||||
For the generic compiler error, this means
|
||||
simply printing out the error text and the location:
|
||||
|
||||
{{< codelines "C++" "compiler/13/error.cpp" 18 21 >}}
|
||||
|
||||
For `type_error`, this logic slightly changes,
|
||||
enabling colors when printing the location:
|
||||
|
||||
{{< codelines "C++" "compiler/13/error.cpp" 27 30 >}}
|
||||
|
||||
Finally, for `unification_error`, we also include
|
||||
the code to print out the two types that our
|
||||
compiler could not unify:
|
||||
|
||||
{{< codelines "C++" "compiler/13/error.cpp" 32 41 >}}
|
||||
|
||||
There's a subtle change here. Compared to the previous
|
||||
type-printing code (which we had in `main`), what
|
||||
we wrote here deals with "expected" and "actual" types.
|
||||
The `left` type passed to the exception is printed
|
||||
first, and is treat like the "correct" type. The
|
||||
`right` type, on the other hand, is treated
|
||||
like the "wrong" type that should have been
|
||||
unifiable with `left`. This will affect the
|
||||
calling conventions of our unification code.
|
||||
|
||||
Now, we can go through and find all the places where
|
||||
we `throw 0`. One such place was in the data type
|
||||
definition code, where declaring the same type parameter
|
||||
twice is invalid. We replace the `0` with a
|
||||
`compiler_error`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/definition.cpp" 66 69 >}}
|
||||
|
||||
Not all `throw 0` statements should become exceptions.
|
||||
For example, here's code from the previous version of
|
||||
the compiler:
|
||||
|
||||
{{< codelines "C++" "compiler/12/definition.cpp" 123 127 >}}
|
||||
|
||||
If a definition `def_defn` has a dependency on a "nearby" (declared
|
||||
in the same group) definition called `dependency`, and if
|
||||
`dependency` does not exist within the same definition group,
|
||||
we throw an exception. But this error is impossible
|
||||
for a user to trigger: the only reason for a variable to appear
|
||||
in the `nearby_variables` vector is that it was previously
|
||||
found in the definition group. Here's the code that proves this
|
||||
(from the current version of the compiler):
|
||||
|
||||
{{< codelines "C++" "compiler/13/definition.cpp" 102 106 >}}
|
||||
|
||||
Not being able to find the variable in the definition group
|
||||
is a compiler bug, and should never occur. So, instead
|
||||
of throwing an exception, we'll use an assertion:
|
||||
|
||||
{{< codelines "C++" "compiler/13/definition.cpp" 128 128 >}}
|
||||
|
||||
For more complicated error messages, we can use a `stringstream`.
|
||||
Here's an example from `parsed_type`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/parsed_type.cpp" 16 23 >}}
|
||||
|
||||
In general, this change is also rather mechanical. Before we
|
||||
move on, to maintain a balance between exceptions and assertions, here
|
||||
are a couple more assertions from `type_env`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type_env.cpp" 81 82 >}}
|
||||
|
||||
Once again, it should not be possible for the compiler
|
||||
to try generalize the type of a variable that doesn't
|
||||
exist, and nor should generalization occur twice.
|
||||
|
||||
While we're on the topic of types, let's talk about
|
||||
`type_mgr::unify`. In practice, I suspect that a lot of
|
||||
errors in our compiler will originate from this method.
|
||||
However, at present, this method does not in any way
|
||||
track the locations of where a unification error occurred.
|
||||
To fix this, we add a new `loc` parameter to `unify`,
|
||||
which we make optional to allow for unification without
|
||||
a known location. Here's the declaration:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type.hpp" 92 92 >}}
|
||||
|
||||
The change to the implementation is mechanical and repetitive,
|
||||
so instead of showing you the whole method, I'll settle for
|
||||
a couple of lines:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type.cpp" 121 122 >}}
|
||||
|
||||
We want to make sure that a location provided to the
|
||||
top-level call to `unify` is also forwarded to the
|
||||
recursive calls, so we have to explicitly add it
|
||||
to the call.
|
||||
|
||||
We'll also have to update the 'main' code to call the
|
||||
`pretty_print` methods, but there's another big change
|
||||
that we're going to make before then. However, once that
|
||||
change is made, our errors will look a lot better.
|
||||
Here is what's printed out to the user when a type error
|
||||
occurs:
|
||||
|
||||
```
|
||||
an error occured while checking the types of the program: failed to unify types
|
||||
occuring on line 2:
|
||||
3 + False
|
||||
the expected type was:
|
||||
Int
|
||||
while the actual type was:
|
||||
Bool
|
||||
```
|
||||
|
||||
Here's an error that was previously a `throw 0` statement in our code:
|
||||
|
||||
```
|
||||
an error occured while compiling the program: type variable a used twice in data type definition.
|
||||
occuring on line 1:
|
||||
data Pair a a = { MkPair a a }
|
||||
```
|
||||
|
||||
Now, not only have we eliminated the lazy uses of `throw 0` in our
|
||||
code, but we've also improved the presentation of the errors
|
||||
to the user!
|
||||
|
||||
### Rethinking Name Mangling
|
||||
In the previous post, I said the following:
|
||||
|
||||
> One more thing. Let’s adopt the convention of storing mangled names into the compilation environment. This way, rather than looking up mangled names only for global functions, which would be a ‘gotcha’ for anyone working on the compiler, we will always use the mangled names during compilation.
|
||||
|
||||
Now that I've had some more time to think about it
|
||||
(and now that I've returned to the compiler after
|
||||
a brief hiatus), I think that this was not the right call.
|
||||
Mangled names make sense when translating to LLVM; we certainly
|
||||
don't want to declare two LLVM functions
|
||||
{{< sidenote "right" "mangling-note" "with the same name." >}}
|
||||
By the way, LLVM has its own name mangling functionality. If you
|
||||
declare two functions with the same name, they'll appear as
|
||||
<code>function</code> and <code>function.0</code>. Since LLVM
|
||||
uses the <code>Function*</code> C++ values to refer to functions,
|
||||
as long as we keep them seaprate on <em>our</em> end, things will
|
||||
work.<br>
|
||||
<br>
|
||||
However, in our compiler, name mangling occurs before LLVM is
|
||||
introduced, at translation time. We could create LLVM functions
|
||||
at that time, too, and associate them with variables. But then,
|
||||
our G-machine instructions will be coupled to LLVM, which
|
||||
would not be as clean.
|
||||
{{< /sidenote >}}
|
||||
But things are different for local variables. Our local variables
|
||||
are graphs on a stack, and are not actually compiled to LLVM
|
||||
definitions. It doesn't make sense to mangle their names, since
|
||||
their names aren't present anywhere in the final executable.
|
||||
It's not even "consistent" to mangle them, since global definitions
|
||||
are compiled directly to __PushGlobal__ instructions, while local
|
||||
variables are only referenced through the current `env`.
|
||||
So, I opted to reverse my decision. We will go back to
|
||||
placing variable names directly into `env_var`. Here's
|
||||
an example of this from `global_scope.cpp`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/global_scope.cpp" 6 8 >}}
|
||||
|
||||
Now that we've started using assertions, I also think it's worth
|
||||
to put our new invariant -- "only global definitions have mangled
|
||||
names" -- into code:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type_env.cpp" 36 45 >}}
|
||||
|
||||
Furthermore, we'll _require_ that a global definition
|
||||
has a mangled name. This way, we can be more confident
|
||||
that a variable from a __PushGlobal__ instruction
|
||||
is referencing the right function. To achieve
|
||||
this, we change `get_mangled_name` to stop
|
||||
returning the input string if a mangled name was not
|
||||
found; doing so makes it impossible to check if a mangled
|
||||
name was explicitly defined. Instead,
|
||||
we add two assertions. First, if an environment scope doesn't
|
||||
contain a variable, then it _must_ have a parent.
|
||||
If it does contain variable, that variable _must_ have
|
||||
a mangled name. We end up with the following:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type_env.cpp" 47 55 >}}
|
||||
|
||||
For this to work, we make one more change. Now that we've
|
||||
enabled C++17, we have access to `std::optional`. We
|
||||
can thus represent the presence or absence of mangled
|
||||
names using an optional field, rather than with the empty string `""`.
|
||||
I hear that C++ compilers have pretty good
|
||||
[empty string optimizations](https://www.youtube.com/watch?v=kPR8h4-qZdk),
|
||||
but nonetheless, I think it makes more sense semantically
|
||||
to use "absent" (`nullopt`) instead of "empty" (`""`).
|
||||
Here's the definition of `type_env::variable_data` now:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type_env.hpp" 16 25 >}}
|
||||
|
||||
Since looking up a mangled name for non-global variable
|
||||
{{< sidenote "right" "unrepresentable-note" "will now result in an assertion failure," >}}
|
||||
A very wise human at the very dawn of our species once said,
|
||||
"make illegal states unrepresentable". Their friends and family were a little
|
||||
busy making a fire, and didn't really understand what the heck they meant. Now,
|
||||
we kind of do.<br>
|
||||
<br>
|
||||
It's <em>possible</em> for our <code>type_env</code> to include a
|
||||
<code>variable_data</code> entry that is both global and has no mangled
|
||||
name. But it doesn't have to be this way. We could define two subclasses
|
||||
of <code>variable_data</code>, one global and one local,
|
||||
where only the global one has a <code>mangled_name</code>
|
||||
field. It would be impossible to reach this assertion failure then.
|
||||
{{< /sidenote >}} we have to change
|
||||
`ast_lid::compile` to only call `get_mangled_name` once
|
||||
it ensures that the variable being compiled is, in fact,
|
||||
global:
|
||||
|
||||
{{< codelines "C++" "compiler/13/ast.cpp" 58 63 >}}
|
||||
|
||||
Since all global functions now need to have mangled
|
||||
names, we run into a bit of a problem. What are
|
||||
the mangled names of `(+)`, `(-)`, and so on? We could
|
||||
continue to hardcode them as `plus`, `minus`, etc., but this can
|
||||
(and currently does!) lead to errors. Consider the following
|
||||
piece of code:
|
||||
|
||||
```
|
||||
defn plus x y = { x + y }
|
||||
defn main = { plus 320 6 }
|
||||
```
|
||||
|
||||
We've hardcoded the mangled name of `(+)` to be `plus`. However,
|
||||
`global_scope` doesn't know about this, so when the actual
|
||||
`plus` function gets translated, it also gets assigned the
|
||||
mangled name `plus`. The name is also overwritten in the
|
||||
`llvm_context`, which effectively means that `(+)` is
|
||||
now compiled to a call of the user-defined `plus` function.
|
||||
If we didn't overwrite the name, we would've run into an assertion
|
||||
failure in this scenario anyway. In short, this example illustrates
|
||||
an important point: mangling information needs to be available
|
||||
outside of a `global_scope`. We don't want to do this by having
|
||||
every function take in a `global_scope` to access the mangling
|
||||
information; instead, we'll store the mangling information in
|
||||
a new `mangler` class, which `global_scope` will take as an argument.
|
||||
The new class is very simple:
|
||||
|
||||
{{< codelines "C++" "compiler/13/mangler.hpp" 5 11 >}}
|
||||
|
||||
As with `parse_driver`, `global_scope` takes `mangler` by reference
|
||||
and stores a pointer:
|
||||
|
||||
{{< codelines "C++" "compiler/13/global_scope.hpp" 50 50 >}}
|
||||
|
||||
The implementation of `new_mangled_name` doesn't change, so I'm
|
||||
not going to show it here. With this new mangling information
|
||||
in hand, we can now correctly set the mangled names of binary
|
||||
operators:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.cpp" 22 27 >}}
|
||||
|
||||
Wait a moment, what's a `compiler`? Let's talk about that next.
|
||||
|
||||
### A Top-Level Class
|
||||
Now that we've moved name mangling out of `global_scope`, we have
|
||||
to put it somewhere. The same goes for global definition group
|
||||
and the file manager that are given to `parse_driver`. The two
|
||||
classes _make use_ of the other data, but they don't _own it_.
|
||||
That's why they take it by reference, and store it as a pointer.
|
||||
They're just temporarily allowed access.
|
||||
|
||||
So, what should be the owner of all of these disparate components?
|
||||
Thus far, that has been the `main` function, or the utility
|
||||
functions that it calls out to. However, this is sloppy:
|
||||
we have related data and operations on it, but we don't group
|
||||
them into an object. We can group all of the components of our
|
||||
compiler into a `compiler` object, and leave `main.cpp` with
|
||||
exception printing code.
|
||||
|
||||
The definition of the `compiler` class begins with all of the data
|
||||
structures that we use in the process of compilation:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.hpp" 12 20 >}}
|
||||
|
||||
There's a loose ordering to these fields. In C++, class members are
|
||||
initialized in the order they are declared; we therefore want to make
|
||||
sure that fields that are depended on by other fields are initialized first.
|
||||
Otherwise, I tried to keep the order consistent with the conceptual path
|
||||
of the code through the compiler.
|
||||
* Parsing happens first, so we begin with `parse_driver`, which needs a
|
||||
`file_manager` (to populate with line information) and a `definition_group`
|
||||
(to receive the global definitions from the parser).
|
||||
* We then proceed to typechecking, for which we use a global `type_env_ptr`
|
||||
(to define the built-in functions and constructors) and a `type_mgr` (to
|
||||
manage the assignments of type variables).
|
||||
* Once a program is typechecked, we transform it, eliminating local
|
||||
function definitions and lambda functions. This is done by storing
|
||||
newly-emitted global functions into the `global_scope`, which requires a
|
||||
`mangler` to generate new names for the target functions.
|
||||
* Finally, to generate LLVM IR, we need our `llvm_context` class.
|
||||
|
||||
The methods of the compiler are arranged similarly:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.hpp" 22 31 >}}
|
||||
|
||||
The methods go as follows:
|
||||
|
||||
* `add_default_types` adds the built-in types to the `global_env`.
|
||||
At this point, these types only include `Int`.
|
||||
* `add_binop_type` adds a single binary operator to the global
|
||||
type environment. We saw its implementation earlier: it deals
|
||||
with both binding a type, and setting a mangled name.
|
||||
* `add_default_types` adds the types for each binary operator.
|
||||
* `parse`, `typecheck`, `translate` and `compile` all do exactly
|
||||
what they say. In this case, compilation refers to creating G-machine
|
||||
instructions.
|
||||
* `create_llvm_binop` creates an internal function that forces the
|
||||
evaluation of its two arguments, and actually applies the given binary
|
||||
operator. Recall that the `(+)` in user code constructs a call to this
|
||||
function, but leaves it unevaluated until it's needed.
|
||||
* `generate_llvm` converts all the definitions in `global_scope`, which
|
||||
are at this point compiled into G-machine `instruction`s, into LLVM IR.
|
||||
* `output_llvm` contains all the code to actually generate an object
|
||||
file from the LLVM IR.
|
||||
|
||||
These functions are mostly taken from part 12's `main.cpp`, and adjusted
|
||||
to use the `compiler`'s members rather than local definitions or arguments.
|
||||
You should compare part 12's
|
||||
[`main.cpp`](https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/compiler/12/main.cpp)
|
||||
file with the
|
||||
[`compiler.cpp`](https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/compiler/13/compiler.cpp)
|
||||
file that we end up with at the end of this post.
|
||||
|
||||
Next, we have the compiler's constructor, and its `operator()`. The
|
||||
latter, analogously to our parsing driver, will trigger the compilation
|
||||
process. Their implementations are straightforward:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.cpp" 131 145 >}}
|
||||
|
||||
We also add a couple of methods to give external code access to
|
||||
some of the compiler's data structures. I omit their (trivial)
|
||||
implementations, but they have the following signatures:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.hpp" 35 36 >}}
|
||||
|
||||
With all the compilation code tucked into our new `compiler` class,
|
||||
`main` becomes very simple. We also finally get to use our exception
|
||||
pretty printing code:
|
||||
|
||||
{{< codelines "C++" "compiler/13/main.cpp" 11 27 >}}
|
||||
|
||||
With this, we complete our transition to a compiler object.
|
||||
All that's left is to clean up the code style.
|
||||
|
||||
### Keeping Things Private
|
||||
Hand-writing or generating hundreds of trivial getters and setters
|
||||
for the fields of a data class (which is standard in the world of Java) seems
|
||||
absurd to me. So, for most of this project, I stuck with
|
||||
`struct`s, rather than classes. But this is not a good policy
|
||||
to apply _everywhere_. I still think it makes sense to make
|
||||
data structures like `ast` and `type` public-by-default;
|
||||
however, I _don't_ think that way about classes like `type_mgr`,
|
||||
`llvm_context`, `type_env`, and `env`. All of these have information
|
||||
that we should never be accessing directly. Some guard this
|
||||
information with assertions. In short, it should be protected.
|
||||
|
||||
For most classes, the changes are mechanical. For instance, we
|
||||
can make `type_env` a class simply by changing its declaration,
|
||||
and marking all of its functions public. This requires a slight
|
||||
refactoring of a line that used its `parent` field. Here's
|
||||
what it used to be (in context):
|
||||
|
||||
{{< codelines "C++" "compiler/12/main.cpp" 57 60 >}}
|
||||
|
||||
And here's what it is now:
|
||||
|
||||
{{< codelines "C++" "compiler/13/compiler.cpp" 55 58 >}}
|
||||
|
||||
Rather than traversing the chain of environments from
|
||||
the body of the definition, we just use the definition's
|
||||
own `env_ptr`. This is cleaner and more explicit, and
|
||||
it helps us not use the private members of `type_env`!
|
||||
|
||||
The deal with `env` is about as simple. We just make
|
||||
it and its two descendants classes, and mark their
|
||||
methods and constructors public. The same
|
||||
goes for `global_scope`. To make `type_mgr`
|
||||
a class, we have to add a new method: `lookup`.
|
||||
Here's its implementation:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type.cpp" 81 85 >}}
|
||||
|
||||
It's used in `type_var::print` as follows:
|
||||
|
||||
{{< codelines "C++" "compiler/13/type.cpp" 28 35 >}}
|
||||
|
||||
We can't use `resolve` here because it takes (and returns)
|
||||
a `type_ptr`. If we make it _take_ a `type*`, it won't
|
||||
be able to return its argument if it's already resolved. If we
|
||||
allow it to _return_ `type*`, we won't have an owning
|
||||
reference. We also don't want to duplicate the
|
||||
method just for this one call. Notice, though, how similar
|
||||
`type_var::print`/`lookup` and `resolve` are in terms of execution.
|
||||
|
||||
The change for `llvm_context` requires a little more work.
|
||||
Right now, `ctx.builder` is used a _lot_ in `instruction.cpp`.
|
||||
Since we don't want to forward each of the LLVM builder methods,
|
||||
and since it feels weird to make `llvm_context` extend `llvm::IRBuilder`,
|
||||
we'll just provide a getter for the `builder` field. The
|
||||
same goes for `module`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/llvm_context.hpp" 46 47 >}}
|
||||
|
||||
Here's what some of the code from `instruction.cpp` looks like now:
|
||||
|
||||
{{< codelines "C++" "compiler/13/instruction.cpp" 144 145 >}}
|
||||
|
||||
Right now, the `ctx` field of the `llvm_context` (which contains
|
||||
the `llvm::LLVMContext`) is only externally used to create
|
||||
instances of `llvm::BasicBlock`. We'll add a proxy method
|
||||
for this functionality:
|
||||
|
||||
{{< codelines "C++" "compiler/13/llvm_context.cpp" 174 176 >}}
|
||||
|
||||
Finally, `instruction_pushglobal` needs to access the
|
||||
`llvm::Function` instances that we create in the process
|
||||
of compilation. We add a new `get_custom_function` method
|
||||
to support this, which automatically prefixes the function
|
||||
name with `f_`, much like `create_custom_function`:
|
||||
|
||||
{{< codelines "C++" "compiler/13/llvm_context.cpp" 292 294 >}}
|
||||
|
||||
I think that's enough. If we chose to turn more compiler
|
||||
data structures into classes, I think we would've quickly drowned
|
||||
in one-line getter and setter methods.
|
||||
|
||||
That's all for the cleanup! We've added locations and more errors
|
||||
to the compiler, stopped throwing `0` in favor of proper exceptions
|
||||
or assertions, made name mangling more reasonable, fixed a bug with
|
||||
accidentally shadowing default functions, organized our compilation
|
||||
process into a `compiler` class, and made more things into classes.
|
||||
In the next post, I hope to tackle __strings__ and __Input/Output__.
|
||||
I also think that implementing __modules__ would be a good idea,
|
||||
though at the moment I don't know too much on the subject. I hope
|
||||
you'll join me in my future writing!
|
||||
|
||||
### Appendix: Optimization
|
||||
When I started working on the compiler after the previous post,
|
||||
I went a little overboard. I started working on optimizing the generated programs,
|
||||
but eventually decided I wasn't doing a
|
||||
{{< sidenote "right" "good-note" "good enough" >}}
|
||||
I think authors should feel a certain degree of responsibility
|
||||
for the content they create. If I do something badly, somebody
|
||||
else trusts me and learns from it, who knows how much damage I've done.
|
||||
I try not to do damage.<br>
|
||||
<br>
|
||||
If anyone reads what I write, anyway!
|
||||
{{< /sidenote >}} job to present it to others,
|
||||
and scrapped that part of the compiler altogether. I'm not
|
||||
sure if I will try again in the near future. But,
|
||||
if you're curious about optimization, here are a few avenues
|
||||
I've explored or thought about:
|
||||
|
||||
* __Unboxing numbers__. Right now, numbers are allocated and garbage
|
||||
collected just like the rest of the graph nodes. This is far from ideal.
|
||||
We could use pointers to represent numbers, by tagging their most significant
|
||||
bits on 64-bit CPUs. Rather than allocating a node, the runtime will just
|
||||
cast a number to a pointer, tag it, and push it on the stack.
|
||||
* __Converting enumeration data types to numbers__. If no constructor
|
||||
of a data type takes any arguments, then the tag uniquely identifies
|
||||
each constructor. Combined with unboxed numbers, this can save unnecessary
|
||||
allocations and memory accesses.
|
||||
* __Special treatment for global constants__. It makes sense for
|
||||
global functions to be converted into LLVM functions, but the
|
||||
same is not the case for
|
||||
{{< sidenote "right" "constant-note" "constants." >}}
|
||||
Yeah, yeah, a constant is just a nullary function. Get
|
||||
out of here with your pedantry!
|
||||
{{< /sidenote >}} We can find a way to
|
||||
initialize global constants once, which would save some work. To
|
||||
make more constants suitable for this, we could employ
|
||||
[monomorphism restriction](https://wiki.haskell.org/Monomorphism_restriction).
|
||||
* __Optimizing stack operations.__ If you read through the LLVM IR
|
||||
we produce, you can see a lot of code that peeks at something twice,
|
||||
or pops-then-pushes the same value, or does other absurd things. LLVM
|
||||
isn't aware of the semantics of our stacks, but perhaps we could write an
|
||||
optimization pass to deal with some of the more blatant instances of
|
||||
this issue.
|
||||
|
||||
If you attempt any of these, let me know how it goes, please!
|
||||
304
content/blog/backend_math_rendering.md
Normal file
@@ -0,0 +1,304 @@
|
||||
---
|
||||
title: Rendering Mathematics On The Back End
|
||||
date: 2020-07-21T14:54:26-07:00
|
||||
tags: ["Website", "Nix", "Ruby", "KaTeX"]
|
||||
---
|
||||
|
||||
Due to something of a streak of bad luck when it came to computers, I spent a
|
||||
significant amount of time using a Linux-based Chromebook, and then a
|
||||
Pinebook Pro. It was, in some way, enlightening. The things that I used to take
|
||||
for granted with a 'powerful' machine now became a rare luxury: StackOverflow,
|
||||
and other relatively static websites, took upwards of ten seconds to finish
|
||||
loading. On Slack, each of my keypresses could take longer than 500ms to
|
||||
appear on the screen, and sometimes, it would take several seconds. Some
|
||||
websites would present me with a white screen, and remain that way for much
|
||||
longer than I had time to wait. It was awful.
|
||||
|
||||
At one point, I installed uMatrix, and made it the default policy to block
|
||||
all JavaScript. For the most part, this worked well. Of course, I had to
|
||||
enable JavaScript for applications that needed to be interactive, like
|
||||
Slack, and Discord. But for the most part, I was able to browse the majority
|
||||
of the websites I normally browse. This went on until I started working
|
||||
on the [compiler series]({{< relref "00_compiler_intro.md" >}}) again,
|
||||
and discovered that the LaTeX math on my page, which was required
|
||||
for displaying things like inference rules, didn't work without
|
||||
JavaScript. I was left with two options:
|
||||
|
||||
* Allow JavaScript, and continue using MathJax to render my math.
|
||||
* Make it so that the mathematics are rendered on the back end.
|
||||
|
||||
I've [previously written about math rendering]({{< relref "math_rendering_is_wrong.md" >}}),
|
||||
and made the observation that MathJax's output for LaTeX is __identical__
|
||||
on every computer. From the MathJax 2.6 change log:
|
||||
|
||||
> _Improved CommonHTML output_. The CommonHTML output now provides the same layout quality and MathML support as the HTML-CSS and SVG output. It is on average 40% faster than the other outputs and the markup it produces are identical on all browsers and thus can also be pre-generated on the server via MathJax-node.
|
||||
|
||||
It seems absurd, then, to offload this kind of work into the users, to
|
||||
be done over and over again. As should be clear from the title of
|
||||
this post, this made me settle for the second option: it was
|
||||
__obviously within reach__, especially for a statically-generated website
|
||||
like mine, to render math on the backend.
|
||||
|
||||
I settled on the following architecture:
|
||||
|
||||
* As before, I would generate my pages using Hugo.
|
||||
* I would use the KaTeX NPM package to render math.
|
||||
* To build the website no matter what system I was on, I would use Nix.
|
||||
|
||||
It so happens that Nix isn't really required for using my approach in general.
|
||||
I will give my setup here, but feel free to skip ahead.
|
||||
|
||||
### Setting Up A Nix Build
|
||||
My `default.nix` file looks like this:
|
||||
|
||||
```Nix {linenos=table}
|
||||
{ stdenv, hugo, fetchgit, pkgs, nodejs, ruby }:
|
||||
|
||||
let
|
||||
url = "https://dev.danilafe.com/Web-Projects/blog-static.git";
|
||||
rev = "<commit>";
|
||||
sha256 = "<hash>";
|
||||
requiredPackages = import ./required-packages.nix {
|
||||
inherit pkgs nodejs;
|
||||
};
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
name = "blog-static";
|
||||
version = rev;
|
||||
src = fetchgit {
|
||||
inherit url rev sha256;
|
||||
};
|
||||
builder = ./builder.sh;
|
||||
converter = ./convert.rb;
|
||||
buildInputs = [
|
||||
hugo
|
||||
requiredPackages.katex
|
||||
(ruby.withPackages (ps: [ ps.nokogiri ]))
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
I'm using `node2nix` to generate the `required-packages.nix` file, which allows me,
|
||||
even from a sandboxed Nix build, to download and install `npm` packages. This is needed
|
||||
so that I have access to the `katex` binary at build time. I fed the following JSON file
|
||||
to `node2nix`:
|
||||
|
||||
```JSON {linenos=table}
|
||||
[
|
||||
"katex"
|
||||
]
|
||||
```
|
||||
|
||||
The Ruby script I wrote for this (more on that soon) required the `nokogiri` gem, which
|
||||
I used for traversing the HTML generated for my site. Hugo was obviously required to
|
||||
generate the HTML.
|
||||
|
||||
### Converting LaTeX To HTML
|
||||
After my first post complaining about the state of mathematics on the web, I received
|
||||
the following email (which the author allowed me to share):
|
||||
|
||||
> Sorry for having a random stranger email you, but in your blog post
|
||||
[(link)](https://danilafe.com/blog/math_rendering_is_wrong) you seem to focus on MathJax's
|
||||
difficulty in rendering things server-side, while quietly ignoring that KaTeX's front
|
||||
page advertises server-side rendering. Their documentation [(link)](https://katex.org/docs/options.html)
|
||||
even shows (at least as of the time this email was sent) that it renders both HTML
|
||||
(to be arranged nicely with their CSS) for visuals and MathML for accessibility.
|
||||
|
||||
The author of the email then kindly provided a link to a page they generated using KaTeX and
|
||||
some Bash scripts. The math on this page was rendered at the time it was generated.
|
||||
|
||||
This is a great point, and KaTeX is indeed usable for server-side rendering. But I've
|
||||
seen few people who do actually use it. Unfortunately, as I pointed out in my previous post on the subject,
|
||||
few tools actually take your HTML page and replace LaTeX with rendered math.
|
||||
Here's what I wrote about this last time:
|
||||
|
||||
> [In MathJax,] The bigger issue, though, was that the `page2html`
|
||||
program, which rendered all the mathematics in a single HTML page,
|
||||
was gone. I found `tex2html` and `text2htmlcss`, which could only
|
||||
render equations without the surrounding HTML. I also found `mjpage`,
|
||||
which replaced mathematical expressions in a page with their SVG forms.
|
||||
|
||||
This is still the case, in both MathJax and KaTeX. The ability
|
||||
to render math in one step is the main selling point of front-end LaTeX renderers:
|
||||
all you have to do is drop in a file from a CDN, and voila, you have your
|
||||
math. There are no such easy answers for back-end rendering. In fact,
|
||||
as we will soon see, it's not possible to just search-and-replace occurences
|
||||
of mathematics on your page, either. To actually get KaTeX working
|
||||
on the backend, you need access to tools that handle the potential variety
|
||||
of edge cases associated with HTML. Such tools, to my knowledge, do not
|
||||
currently exist.
|
||||
|
||||
I decided to write my own Ruby script to get the job done. From this script, I
|
||||
would call the `katex` command-line program, which would perform
|
||||
the heavy lifting of rendering the mathematics.
|
||||
|
||||
There are two types of math on my website: inline math and display math.
|
||||
On the command line ([here are the docs](https://katex.org/docs/cli.html)),
|
||||
the distinction is made using the `--display-mode` argument. So, the general algorithm
|
||||
is to replace the code inside the `$$...$$` with their display-rendered version,
|
||||
and the code inside the `\(...\)` with the inline-rendered version. I came up with
|
||||
the following Ruby function:
|
||||
|
||||
```Ruby {linenos=table}
|
||||
def render_cached(cache, command, string, render_comment = nil)
|
||||
cache.fetch(string) do |new|
|
||||
puts " Rendering #{render_comment || new}"
|
||||
cache[string] = Open3.popen3(command) do |i, o, e, t|
|
||||
i.write new
|
||||
i.close
|
||||
o.read.force_encoding(Encoding::UTF_8).strip
|
||||
end
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
Here, the `cache` argument is used to prevent re-running the `katex` command
|
||||
on an equation that was already rendered before (the output is the same, after all).
|
||||
The `command` is the specific shell command that we want to invoke; this would
|
||||
be either `katex` or `katex -d`. The `string` is the math equation to render,
|
||||
and the `render_comment` is the string to print to the console instead of the equation
|
||||
(so that long, display math equations are not printed out to standard out).
|
||||
|
||||
Then, given a substring of the HTML file, we use regular expressions
|
||||
to find the `\(...\)` and `$$...$$`s, and use the `render_cached` method
|
||||
on the LaTeX code inside.
|
||||
|
||||
```Ruby {linenos=table}
|
||||
def perform_katex_sub(inline_cache, display_cache, content)
|
||||
rendered = content.gsub /\\\(((?:[^\\]|\\[^\)])*)\\\)/ do |match|
|
||||
render_cached(inline_cache, "katex", $~[1])
|
||||
end
|
||||
rendered = rendered.gsub /\$\$((?:[^\$]|$[^\$])*)\$\$/ do |match|
|
||||
render_cached(display_cache, "katex -d", $~[1], "display")
|
||||
end
|
||||
return rendered
|
||||
end
|
||||
```
|
||||
|
||||
There's a bit of a trick to the final layer of this script. We want to be
|
||||
really careful about where we replace LaTeX, and where we don't. In
|
||||
particular, we _don't_ want to go into the `code` tags. Otherwise,
|
||||
it wouldn't be possible to talk about LaTeX code! I also suspect that
|
||||
some captions, alt texts, and similar elements should also be left alone.
|
||||
However, I don't have those on my website (yet), and I won't worry about
|
||||
them now. Either way, because of the code tags,
|
||||
we can't just search-and-replace over the entire page; we need to be context
|
||||
aware. This is where `nokogiri` comes in. We parse the HTML, and iterate
|
||||
over all of the 'text' nodes, calling `perform_katex_sub` on all
|
||||
of those that _aren't_ inside code tags.
|
||||
|
||||
Fortunately, this kind of iteration is pretty easy to specify thanks to something called XPath.
|
||||
This was my first time encountering it, but it seems extremely useful: it's
|
||||
a sort of language for selecting XML nodes. First, you provide an 'axis',
|
||||
which is used to specify the positions of the nodes you want to look at
|
||||
relative to the root node. The axis `/` looks at the immediate children
|
||||
(this would be the `html` tag in a properly formatted document, I would imagine).
|
||||
The axis `//` looks at all the transitive children. That is, it will look at the
|
||||
children of the root, then its children, and so on. There's also the `self` axis,
|
||||
which looks at the node itself.
|
||||
|
||||
After you provide an axis, you need to specify the type of node that you want to
|
||||
select. We can write `code`, for instance, to pick only the `<code>....</code>` tags
|
||||
from the axis we've chosen. We can also use `*` to select any node, and we can
|
||||
use `text()` to select text nodes, such as the `Hello` inside of `<b>Hello</b>`.
|
||||
|
||||
We can also apply some more conditions to the nodes we pick using `[]`.
|
||||
For us, the relevant feature here is `not(...)`, which allows us to
|
||||
select nodes that do __not__ match a particular condition. This is all
|
||||
we need to know.
|
||||
|
||||
We write:
|
||||
|
||||
* `//`, starting to search for nodes everywhere, not just the root of the document.
|
||||
* `*`, to match _any_ node. We want to replace math inside of `div`s, `span`s, `nav`s,
|
||||
all of the `h`s, and so on.
|
||||
* `[not(self::code)]`, cutting out all the `code` tags.
|
||||
* `/`, now selecting the nodes that are immediate descendants of the nodes we've selected.
|
||||
* `text()`, giving us the text contents of all the nodes we've selected.
|
||||
|
||||
All in all:
|
||||
|
||||
```
|
||||
//*[not(self::code)]/text()
|
||||
```
|
||||
|
||||
Finally, we use this XPath from `nokogiri`:
|
||||
|
||||
```Ruby {linenos=table}
|
||||
files = ARGV[0..-1]
|
||||
inline_cache, display_cache = {}, {}
|
||||
|
||||
files.each do |file|
|
||||
puts "Rendering file: #{file}"
|
||||
document = Nokogiri::HTML.parse(File.open(file))
|
||||
document.search('//*[not(self::code)]/text()').each do |t|
|
||||
t.replace(perform_katex_sub(inline_cache, display_cache, t.content))
|
||||
end
|
||||
File.write(file, document.to_html)
|
||||
end
|
||||
```
|
||||
|
||||
I named this script `convert.rb`; it's used from inside of the Nix expression
|
||||
and its builder, which we will cover below.
|
||||
|
||||
### Tying it All Together
|
||||
Finally, I wanted an end-to-end script to generate HTML pages and render the LaTeX in them.
|
||||
I used Nix for this, but the below script will largely be compatible with a non-Nix system.
|
||||
I came up with the following, commenting on Nix-specific commands:
|
||||
|
||||
```Bash {linenos=table}
|
||||
# Nix-specific; set up paths.
|
||||
source $stdenv/setup
|
||||
|
||||
# Build site with Hugo
|
||||
# The cp is Nix-specific; it copies the blog source into the current directory.
|
||||
cp -r $src/* .
|
||||
hugo --baseUrl="https://danilafe.com"
|
||||
|
||||
# Render math in HTML and XML files.
|
||||
# $converter is Nix-specific; you can just use convert.rb.
|
||||
find public/ -regex "public/.*\.html" | xargs ruby $converter
|
||||
|
||||
# Output result
|
||||
# $out is Nix-specific; you can replace it with your destination folder.
|
||||
mkdir $out
|
||||
cp -r public/* $out/
|
||||
```
|
||||
|
||||
This is it! Using the two scripts, `convert.rb` and `builder.sh`, I
|
||||
was able to generate my blog with the math rendered on the back-end.
|
||||
Please note, though, that I had to add the KaTeX CSS to my website's
|
||||
`<head>`.
|
||||
|
||||
### Caveats
|
||||
The main caveat of my approach is performance. For every piece of
|
||||
mathematics that I render, I invoke the `katex` command. This incurs
|
||||
the penalty of Node's startup time, every time, and makes my approach
|
||||
take a few dozen seconds to run on my relatively small site. The
|
||||
better approach would be to use a NodeJS script, rather than a Ruby one,
|
||||
to perform the conversion. KaTeX also provides an API, so such a NodeJS
|
||||
script can find the files, parse the HTML, and perform the substitutions.
|
||||
I did quite like using `nokogiri` here, though, and I hope that an equivalently
|
||||
pleasant solution exists in JavaScript.
|
||||
|
||||
Re-rendering the whole website is also pretty wasteful. I rarely change the
|
||||
mathematics on more than one page at a time, but every time I do so, I have
|
||||
to re-run the script, and therefore re-render every page. This makes sense
|
||||
for me, since I use Nix, and my builds are pretty much always performed
|
||||
from scratch. On the other hand, for others, this may not be the best solution.
|
||||
|
||||
### Alternatives
|
||||
The same person who sent me the original email above also pointed out
|
||||
[this `pandoc` filter for KaTeX](https://github.com/Zaharid/pandoc_static_katex).
|
||||
I do not use Pandoc, but from what I can see, this fitler relies on
|
||||
Pandoc's `Math` AST nodes, and applies KaTeX to each of those. This
|
||||
should work, but wasn't applicable in my case, since Hugo's shrotcodes
|
||||
don't mix well with Pandoc. However, it certainly seems like a workable
|
||||
solution.
|
||||
|
||||
### Conclusion
|
||||
With the removal of MathJax from my site, it is now completely JavaScript free,
|
||||
and contains virtually the same HTML that it did beforehand. This, I hope,
|
||||
makes it work better on devices where computational power is more limited.
|
||||
I also hope that it illustrates a general principle - it's very possible,
|
||||
and plausible, to render LaTeX on the back-end for a static site.
|
||||
312
content/blog/boolean_values.md
Normal file
@@ -0,0 +1,312 @@
|
||||
---
|
||||
title: "How Many Values Does a Boolean Have?"
|
||||
date: 2020-08-21T23:05:55-07:00
|
||||
tags: ["Java", "Haskell", "C and C++"]
|
||||
favorite: true
|
||||
---
|
||||
|
||||
A friend of mine recently had an interview for a software
|
||||
engineering position. They later recounted to me the content
|
||||
of the technical questions that they had been asked. Some had
|
||||
been pretty standard:
|
||||
|
||||
* __"What's the difference between concurrency
|
||||
and parallelism?"__ -- a reasonable question given that Go was
|
||||
the company's language of choice.
|
||||
* __"What's the difference between a method and a function?"__ --
|
||||
a little more strange, in my opinion, since the difference
|
||||
is of little _practical_ use.
|
||||
|
||||
But then, they recounted a rather interesting question:
|
||||
|
||||
> How many values does a bool have?
|
||||
|
||||
Innocuous at first, isn't it? Probably a bit simpler, in fact,
|
||||
than the questions about methods and functions, concurrency
|
||||
and parallelism. It's plausible that a candidate
|
||||
has not done much concurrent or parallel programming in their
|
||||
life, or that they came from a language in which functions
|
||||
were rare and methods were ubiquitous. It's not plausible,
|
||||
on the other hand, that a candidate applying to a software
|
||||
engineering position has not encountered booleans.
|
||||
|
||||
If you're genuinely unsure about the answer to the question,
|
||||
I think there's no reason for me to mess with you. The
|
||||
simple answer to the question -- as far as I know -- is that a boolean
|
||||
has two values. They are `true` and `false` in Java, or `True` and `False`
|
||||
in Haskell, and `1` and `0` in C. A boolean value is either true or false.
|
||||
|
||||
So, what's there to think about? There are a few things, _ackshually_.
|
||||
Let's explore them, starting from the theoretical perspective.
|
||||
|
||||
### Types, Values, and Expressions
|
||||
Boolean, or `bool`, is a type. Broadly speaking, a type
|
||||
is a property of _something_ that defines what the _something_
|
||||
means and what you can do with it. That _something_ can be
|
||||
several things; for our purposes, it can either be an
|
||||
_expression_ in a programming language (like those in the form `fact(n)`)
|
||||
or a value in that same programming language (like `5`).
|
||||
|
||||
Dealing with values is rather simple. Most languages have finite numbers,
|
||||
usually with \\(2^{32}\\) values, which have type `int`,
|
||||
`i32`, or something in a similar vein. Most languages also have
|
||||
strings, of which there are as many as you have memory to contain,
|
||||
and which have the type `string`, `String`, or occasionally
|
||||
the more confusing `char*`. Most languages also have booleans,
|
||||
as we discussed above.
|
||||
|
||||
The deal with expressions is a more interesting. Presumably
|
||||
expressions evaluate to values, and the type of an expression
|
||||
is then the type of values it can yield. Consider the following
|
||||
snippet in C++:
|
||||
|
||||
```C
|
||||
int square(int x) {
|
||||
return x * x;
|
||||
}
|
||||
```
|
||||
|
||||
Here, the expression `x` is known to have type `int` from
|
||||
the type signature provided by the user. Multiplication
|
||||
of integers yields an integer, and so the type of `x*x` is also
|
||||
of type `int`. Since `square(x)` returns `x*x`, it is also
|
||||
of type `int`. So far, so good.
|
||||
|
||||
Okay, how about this:
|
||||
|
||||
```C++
|
||||
int meaningOfLife() {
|
||||
return meaningOfLife();
|
||||
}
|
||||
```
|
||||
|
||||
No, wait, doesn't say "stack overflow" just yet. That's no fun.
|
||||
And anyway, this is technically a tail call, so maybe our
|
||||
C++ compiler can avoid growing the stack. And indeed,
|
||||
flicking on the `-O2` flag in this [compiler explorer example](https://godbolt.org/z/9cv4nY),
|
||||
we can see that no stack growth is necessary: it's just
|
||||
an infinite loop. But `meaningOfLife` will never return a value. One could say
|
||||
this computation _diverges_.
|
||||
|
||||
Well, if it diverges, just throw the expression out of the window! That's
|
||||
no `int`! We only want _real_ `int`s!
|
||||
|
||||
And here, we can do that. But what about the following:
|
||||
|
||||
```C++
|
||||
inf_int collatz(inf_int x) {
|
||||
if(x == 1) return 1;
|
||||
if(x % 2 == 0) return collatz(x/2);
|
||||
return collatz(x * 3 + 1);
|
||||
}
|
||||
```
|
||||
|
||||
Notice that I've used the fictitious type
|
||||
`inf_int` to represent integers that can hold
|
||||
arbitrarily large integer values, not just the 32-bit ones.
|
||||
That is important for this example, and I'll explain why shortly.
|
||||
|
||||
The code in the example is a simulation of the process described
|
||||
in the [Collatz conjecture](https://en.wikipedia.org/wiki/Collatz_conjecture).
|
||||
Given an input number `x`, if the number is even, it's divided in half,
|
||||
and the process continues with the halved number. If, on the other
|
||||
hand, the number is odd, it's multiplied by 3, 1 is added to it,
|
||||
and the process continues with _that_ number. The only way for the
|
||||
process to terminate is for the computation to reach the value 1.
|
||||
|
||||
Why does this matter? Because as of right now, __nobody knows__
|
||||
whether or not the process terminates for all possible input numbers.
|
||||
We have a strong hunch that it does; we've checked a __lot__
|
||||
of numbers and found that the process terminates for them.
|
||||
This is why 32-bit integers are not truly sufficient for this example;
|
||||
we know empirically that the function will terminate for them.
|
||||
|
||||
But why does _this_ matter? Well, it matters because we don't know
|
||||
whether or not this function will diverge, and thus, we can't
|
||||
'throw it out of the window' like we wanted to with `meaningOfLife`!
|
||||
In general, it's _impossible to tell_ whether or not a program will
|
||||
terminate; that is the [halting problem](https://en.wikipedia.org/wiki/Halting_problem).
|
||||
So, what do we do?
|
||||
|
||||
It turns out to be convenient -- formally -- to treat the result of a diverging computation
|
||||
as its own value. This value is usually called 'bottom', and written as \\(\\bot\\).
|
||||
Since in most programming languages, you can write a nonterminating expression or
|
||||
function of any type, this 'bottom' is included in _all_ types. So in fact, the
|
||||
possible values of `unsigned int` are \\(\\bot, 0, 1, 2, ...\\) and so on.
|
||||
As you may have by now guessed, the same is true for a boolean: we have \\(\\bot\\), `true`, and `false`.
|
||||
|
||||
### Haskell and Bottom
|
||||
You may be thinking:
|
||||
|
||||
> Now he's done it; he's gone off the deep end with all that programming language
|
||||
theory. Tell me, Daniel, where the heck have you ever encountered \\(\\bot\\) in
|
||||
code? This question was for a software engineering interview, after all!
|
||||
|
||||
You're right; I haven't _specifically_ seen the symbol \\(\\bot\\) in my time
|
||||
programming. But I have frequently used an equivalent notation for the same idea:
|
||||
`undefined`. In fact, here's a possible definition of `undefined` in Haskell:
|
||||
|
||||
```
|
||||
undefined = undefined
|
||||
```
|
||||
|
||||
Just like `meaningOfLife`, this is a divergent computation! What's more is that
|
||||
the type of this computation is, in Haskell, `a`. More explicitly -- and retreating
|
||||
to more mathematical notation -- we can write this type as: \\(\\forall \\alpha . \\alpha\\).
|
||||
That is, for any type \\(\\alpha\\), `undefined` has that type! This means
|
||||
`undefined` can take on _any_ type, and so, we can write:
|
||||
|
||||
```Haskell
|
||||
myTrue :: Bool
|
||||
myTrue = True
|
||||
|
||||
myFalse :: Bool
|
||||
myFalse = False
|
||||
|
||||
myBool :: Bool
|
||||
myBool = undefined
|
||||
```
|
||||
|
||||
In Haskell, this is quite useful. For instance, if one's in the middle
|
||||
of writing a complicated function, and wants to check their work so far,
|
||||
they can put 'undefined' for the part of the function they haven't written.
|
||||
They can then compile their program; the typechecker will find any mistakes
|
||||
they've made so far, but, since the type of `undefined` can be _anything_,
|
||||
that part of the program will be accepted without second thought.
|
||||
|
||||
The language Idris extends this practice with the idea of typed holes,
|
||||
where you can leave fragments of your program unwritten, and ask the
|
||||
compiler what kind of _thing_ you need to write to fill that hole.
|
||||
|
||||
### Java and `null`
|
||||
Now you may be thinking:
|
||||
|
||||
> This whole deal with Haskell's `undefined` is beside the point; it doesn't
|
||||
really count as a value, since it's just a nonterminating
|
||||
expression. What you're doing is a kind of academic autofellatio.
|
||||
|
||||
Alright, I can accept this criticism. Perhaps just calling a nonterminating
|
||||
function a value _is_ far-fetched (even though in [denotational semantics](https://en.wikipedia.org/wiki/Denotational_semantics)
|
||||
we _do_ extend types with \\(\\bot\\)). But denotational semantics are not
|
||||
the only place where types are implicitly extend with an extra value;
|
||||
let's look at Java.
|
||||
|
||||
In Java, we have `null`. At the
|
||||
core language level, any function or method that accepts a class can also take `null`;
|
||||
if `null` is not to that function or method's liking, it has to
|
||||
explicitly check for it using `if(x == null)`.
|
||||
|
||||
This `null` value does not at first interact with booleans.
|
||||
After all, Java's booleans are not classes. Unlike classes, which you have
|
||||
to allocate using `new`, you can just throw around `true` and `false` as you see
|
||||
fit. Also unlike classes, you simply can't assign `null` to a boolean value.
|
||||
|
||||
The trouble is, the parts of Java dealing with _generics_, which allow you to write
|
||||
polymorphic functions, can't handle 'primitives' like `bool`. If you want to have an `ArrayList`
|
||||
of something, that something _must_ be a class.
|
||||
But what if you really _do_ want an `ArrayList` of booleans? Java solves this problem by introducing
|
||||
'boxed' booleans: they're primitives wrapped in a class, called `Boolean`. This class
|
||||
can then be used for generics.
|
||||
|
||||
But see, this is where `null` has snuck in again. By allowing `Boolean` to be a class
|
||||
(thereby granting it access to generics), we've also given it the ability to be null.
|
||||
This example is made especially compelling because Java supports something
|
||||
they call [autoboxing](https://docs.oracle.com/javase/tutorial/java/data/autoboxing.html):
|
||||
you can directly assign a primitive to a variable of the corresponding boxed type.
|
||||
Consider the example:
|
||||
|
||||
```Java
|
||||
Boolean myTrue = true;
|
||||
Boolean myFalse = false;
|
||||
Boolean myBool = null;
|
||||
```
|
||||
|
||||
Beautiful, isn't it? Better yet, unlike Haskell, where you can't _really_
|
||||
check if your `Bool` is `undefined` (because you can't tell whether
|
||||
a non-terminating computation is as such), you can very easily
|
||||
check if your `Boolean` is `true`, `false`, or `null`:
|
||||
|
||||
```Java
|
||||
assert myTrue != myFalse;
|
||||
assert myFalse != myBool;
|
||||
assert myTrue != myBool;
|
||||
```
|
||||
|
||||
We're okay to use `!=` here, instead of `equals`, because it so happens
|
||||
each boxed instance of a `boolean` value
|
||||
[refers to the same `Boolean` object](https://stackoverflow.com/questions/28636738/equality-of-boxed-boolean).
|
||||
In fact, this means that a `Boolean` variable can have __exactly__ 3 values!
|
||||
|
||||
### C and Integers
|
||||
Oh the luxury of having a type representing booleans in your language!
|
||||
It's almost overly indulgent compared to the spartan minimalism of C.
|
||||
In C, boolean conditions are represented as numbers. You can perhaps get
|
||||
away with throwing around `char` or `short int`, but even then,
|
||||
these types allow far more values than two!
|
||||
|
||||
```C
|
||||
unsigned char test = 255;
|
||||
while(test) test -= 1;
|
||||
```
|
||||
|
||||
This loop will run 255 times, thereby demonstrating
|
||||
that C has at least 255 values that can be used
|
||||
to represent the boolean `true`.
|
||||
|
||||
There are other languages
|
||||
with this notion of 'truthy' and 'falsey' values, in which
|
||||
something not exactly `true` or `false` can be used as a condition. However,
|
||||
some of them differ from C in that they also extend this idea
|
||||
to equality. In JavaScript:
|
||||
|
||||
```JavaScript
|
||||
console.assert(true == 1)
|
||||
console.assert(false == 0)
|
||||
```
|
||||
|
||||
Then, there are still exactly two distinct boolean values
|
||||
modulo `==`. No such luck in C, though! We have 256 values that fit in `unsigned char`,
|
||||
all of which are also distinct modulo `==`. Our boolean
|
||||
variable can contain all of these values. And there is no
|
||||
respite to be found with `enum`s, either. We could try define:
|
||||
|
||||
```C
|
||||
enum bool { TRUE, FALSE };
|
||||
```
|
||||
|
||||
Unfortunately, all this does is define `bool` to be a numeric
|
||||
type that can hold at least 2 distinct values, and define
|
||||
numeric constants `TRUE` and `FALSE`. So in fact, you can
|
||||
_still_ write the following code:
|
||||
|
||||
```C
|
||||
enum bool b1 = TRUE;
|
||||
enum bool b2 = FALSE;
|
||||
enum bool b3 = 15;
|
||||
```
|
||||
|
||||
And so, no matter how hard you try, your 'boolean'
|
||||
variable can have many, many values!
|
||||
|
||||
### Conclusion
|
||||
I think that 'how many values does a boolean have' is a strange
|
||||
question. Its purpose can be one of two things:
|
||||
|
||||
* The interviewer expected a long-form response such as this one.
|
||||
This is a weird expectation for a software engineering candidate -
|
||||
how does knowing about \\(\\bot\\), `undefined`, or `null` help in
|
||||
creating software, especially if this information is irrelevant
|
||||
to the company's language of choice?
|
||||
* The interviewer expected the simple answer. In that case,
|
||||
my previous observation applies: what software engineering
|
||||
candidate has _not_ seen a boolean in their time programming?
|
||||
Surely candidates are better screened before they are offered
|
||||
an interview?
|
||||
|
||||
Despite the question's weirdness, I think that the resulting
|
||||
investigation of the matter -- outside of the interview setting --
|
||||
is useful, and perhaps, in a way, enlightening. It may help
|
||||
one understand the design choices made in _their_ language of choice,
|
||||
and how those choices shape the code that they write.
|
||||
|
||||
That's all I have! I hope that you found it interesting.
|
||||
BIN
content/blog/codelines/example.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
268
content/blog/codelines/index.md
Normal file
@@ -0,0 +1,268 @@
|
||||
---
|
||||
title: "Pleasant Code Includes with Hugo"
|
||||
date: 2021-01-13T21:31:29-08:00
|
||||
tags: ["Hugo"]
|
||||
---
|
||||
|
||||
Ever since I started [the compiler series]({{< relref "00_compiler_intro.md" >}}),
|
||||
I began to include more and more fragments of code into my blog.
|
||||
I didn't want to be copy-pasting my code between my project
|
||||
and my Markdown files, so I quickly wrote up a Hugo [shortcode](https://gohugo.io/content-management/shortcodes/)
|
||||
to pull in other files in the local directory. I've since improved on this
|
||||
some more, so I thought I'd share what I created with others.
|
||||
|
||||
### Including Entire Files and Lines
|
||||
My needs for snippets were modest at first. For the most part,
|
||||
I had a single code file that I wanted to present, so it was
|
||||
acceptable to plop it in the middle of my post in one piece.
|
||||
The shortcode for that was quite simple:
|
||||
|
||||
```
|
||||
{{ highlight (readFile (printf "code/%s" (.Get 1))) (.Get 0) "" }}
|
||||
```
|
||||
|
||||
This leverages Hugo's built-in [`highlight`](https://gohugo.io/functions/highlight/)
|
||||
function to provide syntax highlighting to the included snippet. Hugo
|
||||
doesn't guess at the language of the code, so you have to manually provide
|
||||
it. Calling this shortcode looks as follows:
|
||||
|
||||
```
|
||||
{{</* codeblock "C++" "compiler/03/type.hpp" */>}}
|
||||
```
|
||||
|
||||
Note that this implicitly adds the `code/` prefix to all
|
||||
the files I include. This is a personal convention: I want
|
||||
all my code to be inside a dedicated directory.
|
||||
|
||||
Of course, including entire files only takes you so far.
|
||||
What if you only need to discuss a small part of your code?
|
||||
Alternaitvely, what if you want to present code piece-by-piece,
|
||||
in the style of literate programming? I quickly ran into the
|
||||
need to do this, for which I wrote another shortcode:
|
||||
|
||||
```
|
||||
{{ $s := (readFile (printf "code/%s" (.Get 1))) }}
|
||||
{{ $t := split $s "\n" }}
|
||||
{{ if not (eq (int (.Get 2)) 1) }}
|
||||
{{ .Scratch.Set "u" (after (sub (int (.Get 2)) 1) $t) }}
|
||||
{{ else }}
|
||||
{{ .Scratch.Set "u" $t }}
|
||||
{{ end }}
|
||||
{{ $v := first (add (sub (int (.Get 3)) (int (.Get 2))) 1) (.Scratch.Get "u") }}
|
||||
{{ if (.Get 4) }}
|
||||
{{ .Scratch.Set "opts" (printf ",%s" (.Get 4)) }}
|
||||
{{ else }}
|
||||
{{ .Scratch.Set "opts" "" }}
|
||||
{{ end }}
|
||||
{{ highlight (delimit $v "\n") (.Get 0) (printf "linenos=table,linenostart=%d%s" (.Get 2) (.Scratch.Get "opts")) }}
|
||||
```
|
||||
|
||||
This shortcode takes a language and a filename as before, but it also takes
|
||||
the numbers of the first and last lines indicating the part of the code that should be included. After
|
||||
splitting the contents of the file into lines, it throws away all lines before and
|
||||
after the window of code that you want to include. It seems to me (from my commit history)
|
||||
that Hugo's [`after`](https://gohugo.io/functions/after/) function (which should behave
|
||||
similarly to Haskell's `drop`) doesn't like to be given an argument of `0`.
|
||||
I had to add a special case for when this would occur, where I simply do not invoke `after` at all.
|
||||
The shortcode can be used as follows:
|
||||
|
||||
```
|
||||
{{</* codelines "C++" "compiler/04/ast.cpp" 19 22 */>}}
|
||||
```
|
||||
|
||||
To support a fuller range of Hugo's functionality, I also added an optional argument that
|
||||
accepts Hugo's Chroma settings. This way, I can do things like highlight certain
|
||||
lines in my code snippet, which is done as follows:
|
||||
|
||||
```
|
||||
{{</* codelines "Idris" "typesafe-interpreter/TypesafeIntrV3.idr" 31 39 "hl_lines=7 8 9" */>}}
|
||||
```
|
||||
|
||||
Note that the `hl_lines` field doesn't seem to work properly with `linenostart`, which means
|
||||
that the highlighted lines are counted from 1 no matter what. This is why in the above snippet,
|
||||
although I include lines 31 through 39, I feed lines 7, 8, and 9 to `hl_lines`. It's unusual,
|
||||
but hey, it works!
|
||||
|
||||
### Linking to Referenced Code
|
||||
Some time after implementing my initial system for including lines of code,
|
||||
I got an email from a reader who pointed out that it was hard for them to find
|
||||
the exact file I was referencing, and to view the surrounding context of the
|
||||
presented lines. To address this, I decided that I'd include the link
|
||||
to the file in question. After all, my website and all the associated
|
||||
code is on a [Git server I host](https://dev.danilafe.com/Web-Projects/blog-static),
|
||||
so any local file I'm referencing should -- assuming it was properly committed --
|
||||
show up there, too. I hardcoded the URL of the `code` directory on the web interface,
|
||||
and appended the relative path of each included file to it. The shortcode came out as follows:
|
||||
|
||||
```
|
||||
{{ $s := (readFile (printf "code/%s" (.Get 1))) }}
|
||||
{{ $t := split $s "\n" }}
|
||||
{{ if not (eq (int (.Get 2)) 1) }}
|
||||
{{ .Scratch.Set "u" (after (sub (int (.Get 2)) 1) $t) }}
|
||||
{{ else }}
|
||||
{{ .Scratch.Set "u" $t }}
|
||||
{{ end }}
|
||||
{{ $v := first (add (sub (int (.Get 3)) (int (.Get 2))) 1) (.Scratch.Get "u") }}
|
||||
{{ if (.Get 4) }}
|
||||
{{ .Scratch.Set "opts" (printf ",%s" (.Get 4)) }}
|
||||
{{ else }}
|
||||
{{ .Scratch.Set "opts" "" }}
|
||||
{{ end }}
|
||||
<div class="highlight-group">
|
||||
<div class="highlight-label">From <a href="https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/{{ .Get 1 }}">{{ path.Base (.Get 1) }}</a>,
|
||||
{{ if eq (.Get 2) (.Get 3) }}line {{ .Get 2 }}{{ else }} lines {{ .Get 2 }} through {{ .Get 3 }}{{ end }}</div>
|
||||
{{ highlight (delimit $v "\n") (.Get 0) (printf "linenos=table,linenostart=%d%s" (.Get 2) (.Scratch.Get "opts")) }}
|
||||
</div>
|
||||
```
|
||||
|
||||
This results in code blocks like the one in the image below. The image
|
||||
is the result of the `codelines` call for the Idris language, presented above.
|
||||
|
||||
{{< figure src="example.png" caption="An example of how the code looks." class="medium" >}}
|
||||
|
||||
I got a lot of mileage out of this setup . . . until I wanted to include code from _other_ git repositories.
|
||||
For instance, I wanted to talk about my [Advent of Code](https://adventofcode.com/) submissions,
|
||||
without having to copy-paste the code into my blog repository!
|
||||
|
||||
### Code from Submodules
|
||||
My first thought when including code from other repositories was to use submodules.
|
||||
This has the added advantage of "pinning" the version of the code I'm talking about,
|
||||
which means that even if I push significant changes to the other repository, the code
|
||||
in my blog will remain the same. This, in turn, means that all of my `codelines`
|
||||
shortcodes will work as intended.
|
||||
|
||||
The problem is, most Git web interfaces (my own included) don't display paths corresponding
|
||||
to submodules. Thus, even if all my code is checked out and Hugo correctly
|
||||
pulls the selected lines into its HTML output, the _links to the file_ remain
|
||||
broken!
|
||||
|
||||
There's no easy way to address this, particularly because _different submodules
|
||||
can be located on different hosts_! The Git URL used for a submodule is
|
||||
not known to Hugo (since, to the best of my knowledge, it can't run
|
||||
shell commands), and it could reside on `dev.danilafe.com`, or `github.com`,
|
||||
or elsewhere. Fortunately, it's fairly easy to tell when a file is part
|
||||
of a submodule, and which submodule that is. It's sufficient to find
|
||||
the longest submodule path that matches the selected file. If no
|
||||
submodule path matches, then the file is part of the blog repository,
|
||||
and no special action is needed.
|
||||
|
||||
Of course, this means that Hugo needs to be made aware of the various
|
||||
submodules in my repository. It also needs to be aware of the submodules
|
||||
_inside_ those submodules, and so on: it needs to be recursive. Git
|
||||
has a command to list all submodules recursively:
|
||||
|
||||
```Bash
|
||||
git submodule status --recursive
|
||||
```
|
||||
|
||||
However, this only prints the commit, submodule path, and the upstream branch.
|
||||
I don't think there's a way to list the remotes' URLs with this command; however,
|
||||
we do _need_ the URLs, since that's how we create links to the Git web interfaces.
|
||||
|
||||
There's another issue: how do we let Hugo know about the various submodules,
|
||||
even if we can find them? Hugo can read files, but doing any serious
|
||||
text processing is downright impractical. However, Hugo
|
||||
itself is not able to run commands, so it needs to be able to read in
|
||||
the output of another command that _can_ find submodules.
|
||||
|
||||
I settled on using Hugo's `params` configuration option. This
|
||||
allows users to communicate arbitrary properties to Hugo themes
|
||||
and templates. In my case, I want to communicate a collection
|
||||
of submodules. I didn't know about TOML's inline tables, so
|
||||
I decided to represent this collection as a map of (meaningless)
|
||||
submodule names to tables:
|
||||
|
||||
```TOML
|
||||
[params]
|
||||
[params.submoduleLinks]
|
||||
[params.submoduleLinks.aoc2020]
|
||||
url = "https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020/src/commit/7a8503c3fe1aa7e624e4d8672aa9b56d24b4ba82"
|
||||
path = "aoc-2020"
|
||||
```
|
||||
|
||||
Since it was seemingly impossible to wrangle Git into outputting
|
||||
all of this information using one command, I decided
|
||||
to write a quick Ruby script to generate a list of submodules
|
||||
as follows. I had to use `cd` in one of my calls to Git
|
||||
because Git's `--git-dir` option doesn't seem to work
|
||||
with submodules, treating them like a "bare" checkout.
|
||||
I also chose to use an allowlist of remote URLs,
|
||||
since the URL format for linking to files in a
|
||||
particular repository differs from service to service.
|
||||
For now, I only use my own Git server, so only `dev.danilafe.com`
|
||||
is allowed; however, just by adding `elsif`s to my code,
|
||||
I can add other services in the future.
|
||||
|
||||
```Ruby
|
||||
puts "[params]"
|
||||
puts " [params.submoduleLinks]"
|
||||
|
||||
def each_submodule(base_path)
|
||||
`cd #{base_path} && git submodule status`.lines do |line|
|
||||
hash, path = line[1..].split " "
|
||||
full_path = "#{base_path}/#{path}"
|
||||
url = `git config --file #{base_path}/.gitmodules --get 'submodule.#{path}.url'`.chomp.delete_suffix(".git")
|
||||
safe_name = full_path.gsub(/\/|-|_\./, "")
|
||||
|
||||
if url =~ /dev.danilafe.com/
|
||||
file_url = "#{url}/src/commit/#{hash}"
|
||||
else
|
||||
raise "Submodule URL #{url.dump} not in a known format!"
|
||||
end
|
||||
|
||||
yield ({ :path => full_path, :url => file_url, :name => safe_name })
|
||||
each_submodule(full_path) { |m| yield m }
|
||||
end
|
||||
end
|
||||
|
||||
each_submodule(".") do |m|
|
||||
next unless m[:path].start_with? "./code/"
|
||||
puts " [params.submoduleLinks.#{m[:name].delete_prefix(".code")}]"
|
||||
puts " url = #{m[:url].dump}"
|
||||
puts " path = #{m[:path].delete_prefix("./code/").dump}"
|
||||
end
|
||||
```
|
||||
|
||||
I pipe the output of this script into a separate configuration file
|
||||
called `config-gen.toml`, and then run Hugo as follows:
|
||||
|
||||
```
|
||||
hugo --config config.toml,config-gen.toml
|
||||
```
|
||||
|
||||
Finally, I had to modify my shortcode to find and handle the longest submodule prefix.
|
||||
Here's the relevant portion, and you can
|
||||
[view the entire file here](https://dev.danilafe.com/Web-Projects/blog-static/src/commit/bfeae89ab52d1696c4a56768b7f0c6682efaff82/themes/vanilla/layouts/shortcodes/codelines.html).
|
||||
|
||||
```
|
||||
{{ .Scratch.Set "bestLength" -1 }}
|
||||
{{ .Scratch.Set "bestUrl" (printf "https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/%s" (.Get 1)) }}
|
||||
{{ $filePath := (.Get 1) }}
|
||||
{{ $scratch := .Scratch }}
|
||||
{{ range $module, $props := .Site.Params.submoduleLinks }}
|
||||
{{ $path := index $props "path" }}
|
||||
{{ $bestLength := $scratch.Get "bestLength" }}
|
||||
{{ if and (le $bestLength (len $path)) (hasPrefix $filePath $path) }}
|
||||
{{ $scratch.Set "bestLength" (len $path) }}
|
||||
{{ $scratch.Set "bestUrl" (printf "%s%s" (index $props "url") (strings.TrimPrefix $path $filePath)) }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
And that's what I'm using at the time of writing!
|
||||
|
||||
### Conclusion
|
||||
My current system for code includes allows me to do the following
|
||||
things:
|
||||
|
||||
* Include entire files or sections of files into the page. This
|
||||
saves me from having to copy and paste code manually, which
|
||||
is error prone and can cause inconsistencies.
|
||||
* Provide links to the files I reference on my Git interface.
|
||||
This allows users to easily view the entire file that I'm talking about.
|
||||
* Correctly link to files in repositories other than my blog
|
||||
repository, when they are included using submodules. This means
|
||||
I don't need to manually copy and update code from other projects.
|
||||
|
||||
I hope some of these shortcodes and script come in handy for someone else.
|
||||
Thank you for reading!
|
||||
BIN
content/blog/dell_is_horrible/brokenkey.jpg
Normal file
|
After Width: | Height: | Size: 94 KiB |
BIN
content/blog/dell_is_horrible/brokenlcd.jpg
Normal file
|
After Width: | Height: | Size: 476 KiB |
BIN
content/blog/dell_is_horrible/dm_1.png
Normal file
|
After Width: | Height: | Size: 158 KiB |
BIN
content/blog/dell_is_horrible/dm_2.png
Normal file
|
After Width: | Height: | Size: 204 KiB |
BIN
content/blog/dell_is_horrible/dm_3.png
Normal file
|
After Width: | Height: | Size: 81 KiB |
BIN
content/blog/dell_is_horrible/dm_4.png
Normal file
|
After Width: | Height: | Size: 94 KiB |
BIN
content/blog/dell_is_horrible/dm_5.png
Normal file
|
After Width: | Height: | Size: 102 KiB |
381
content/blog/dell_is_horrible/index.md
Normal file
@@ -0,0 +1,381 @@
|
||||
---
|
||||
title: DELL Is A Horrible Company And You Should Avoid Them At All Costs
|
||||
date: 2020-07-23T13:40:05-07:00
|
||||
tags: ["Electronics"]
|
||||
---
|
||||
|
||||
I really do not want this to be a consumer electronics blog. Such things
|
||||
aren't interesting to me, and nor do I have much knowledge
|
||||
about them. However, sometimes, ripples from these areas make their way
|
||||
into my life, and this is one such instance. Let me tell you
|
||||
{{< sidenote "right" "source-note" "a story" >}}
|
||||
I originally wrote about this in
|
||||
<a href="https://www.dell.com/community/XPS/Ridiculously-Bad-Support-Experience/td-p/7554383">a thread on DELL's support website</a>. Some of this post is
|
||||
going to be adapted from the support website, but some things have happened
|
||||
since. You will probably notice the change between the terse language I used
|
||||
in the original post and the fresh text that I'm writing now.
|
||||
{{< /sidenote >}} of
|
||||
my experience with DELL and their XPS 2-in-1 laptop, which has gone on since
|
||||
around January of 2020, and is still going at the time of writing, in July
|
||||
2020, half a year later.
|
||||
|
||||
I was, until recently, an undergraduate student in Computer Science. I will
|
||||
soon be starting my Masters in Computer Science, too. I say this to make one
|
||||
thing clear: I need a computer. Not only is it a necessity for my major,
|
||||
but the majority of my hobbies -- including this blog -- are digital, too.
|
||||
Since my university is a couple of hours from my home, I travel back and forth
|
||||
a lot. I also have a cozy little spot in the
|
||||
{{< sidenote "right" "offices-note" "graduate student offices" >}}
|
||||
They're a bunch of cubicles in a keycard-protected room, really. Nothing fancy.
|
||||
{{< /sidenote >}}at my university, but travel by bus, so I find myself spending
|
||||
roughly equal portions of my work time at home and 'elsewhere'. A laptop
|
||||
as my primary machine, I thought, made sense. But it had to be a decent one.
|
||||
Persuaded by one of my instructors, who stressed the importance of vision and
|
||||
a decent screen, I settled on a DELL XPS, which at the time came with a 4k
|
||||
display.
|
||||
|
||||
As is commonplace, things went great at first. The screen _was_ really nice,
|
||||
all of my code compiled swiftly, and even the games I occasionally played ran
|
||||
at a solid 60fps. I was happy with my purchase.
|
||||
|
||||
There was one hiccup before things went really downhill, a sort of
|
||||
foreshadowing of things to come. My trackpad didn't work at peculiar times.
|
||||
|
||||
### Prologue: Trackpad Hiccups
|
||||
While working, booted into Linux, I noticed that my trackpad was having some
|
||||
trouble. It was stuttering, and occasionally wouldn't work at all for seconds
|
||||
at a time. I assumed that this was a problem with the trackpad drivers on
|
||||
Linux, or perhaps the whole system was freezing up. I rebooted, and the
|
||||
problem went away.
|
||||
|
||||
Until it came back.
|
||||
|
||||
A few days later, my trackpad was freezing virtually every minute.
|
||||
It was strange, but fortunately, I'm used to a keyboard-based workflow, and
|
||||
the malfunctions did not affect me too much. It was just a little troubling.
|
||||
What soon made it more troubling, was that I noticed this exact same issue
|
||||
occurring on Windows. To me, this meant one dreadful thing: it was a hardware
|
||||
issue.
|
||||
|
||||
I poked and prodded for a little bit, and finally discovered the cause:
|
||||
whenever I put my hand on the left palmrest, the trackpad would reliably stop
|
||||
working. Knowing what the issue was, I called DELL. I spoke to a guy on the
|
||||
other end, who had me run through diagnostics, driver updates, and BIOS
|
||||
settings (I imagined this was procedure, so I didn't mind doing the extra
|
||||
work to make the other guy's job easier). Finally, he scheduled a repair
|
||||
appointment. A technician came into my house, took off the laptop cover,
|
||||
and said something along the lines of:
|
||||
|
||||
> Now look. They gave me a whole new motherboard and case to replace yours,
|
||||
but in my personal opinion, this is a bad idea. Things are bound to break
|
||||
when you do this. See how the replacement case has an insulating piece
|
||||
of fabric under the left palmrest, and yours doesn't? Why don't we rip
|
||||
the fabric off the replacement case, and tape it in place on your machine,
|
||||
without any reassembly?
|
||||
|
||||
This man was wiser than any of the other DELL technicians, I now understand.
|
||||
The repair went without a hitch. He grilled me for going to college instead of
|
||||
just picking up a trade, which was cheaper and offered more job security.
|
||||
In the end, I felt a little weird about having a piece of fabric duct taped
|
||||
inside my computer, but the trackpad had no more issues ever since. All was
|
||||
well.
|
||||
|
||||
### Service Request 1: Broken D Key
|
||||
All was well, that is, until the middle of winter term. I was typing up an
|
||||
assignment for a university class. I was working as usual, when I suddenly
|
||||
noticed that the "d" key stopped working - it had to be pressed rather weird
|
||||
to register on the computer. I looked down, and discovered that the key had
|
||||
snapped in half. The top part of the key fell off shortly thereafter.
|
||||
|
||||
{{< figure src="brokenkey.jpg" caption="The broken D key shortly after the above events." >}}
|
||||
|
||||
At that point, I was more surprised than anything. I hadn't heard of something
|
||||
like this ever happening, especially under circumstances as normal as typing.
|
||||
Regardless, I contacted support, and set up a repair appointment. Things only
|
||||
went downhill from there.
|
||||
|
||||
Again, the appointment was scheduled, and only a few days later, another
|
||||
technician arrived at my house. The only way to repair the key, he said,
|
||||
was to replace the whole keyboard. They keyboard happens to be located
|
||||
underneath all the other hardware, and so, the entire laptop had to be
|
||||
disassembled and reassembled from scratch. He worked for about an hour, and
|
||||
eventually, he put the machine together. The words of the previous
|
||||
technician, who wanted to avoid doing exactly what had just been done, echoed
|
||||
in my head:
|
||||
|
||||
> Things are bound to break when you do this.
|
||||
|
||||
I asked him to test it, just to make sure everything works. Sure enough,
|
||||
not everything did work: the machine no longer had sound!
|
||||
|
||||
### Service Request 2: No sound
|
||||
During diagnostics, the laptop did not emit the "beep" it usually does. This
|
||||
was the first sign. Booting into Windows, the sound icon was crossed out in
|
||||
red, and no sound was present. Booting into Linux led to similar results.
|
||||
The microphone on the machine did not seem to work either. The service
|
||||
technician said that he didn't have the parts to repair it, told me he'd call
|
||||
it in, and left. Soon after, I got an email asking for times I'm available to
|
||||
call: I said "any time except for 1-4 pacific time". DELL support proceeded
|
||||
to call me at 3pm pacific time, when I had no service. Unable to reach me,
|
||||
they promptly notified me that they are archiving my service request.
|
||||
|
||||
This all occurred near finals week at my university, so I had to put the issue
|
||||
on hold. I had to maintain my grades, and I had to grade heaps of assignments
|
||||
from other students. Though the lack of sound was annoying, it wasn't as
|
||||
pressing as preparing for exams, so it was during spring break that I finally
|
||||
called again, and scheduled the service appointment. By then,
|
||||
{{< sidenote "right" "pandemic-note" "the pandemic was in full swing," >}}
|
||||
Just for posterity, in 2020, there had been an outbreak of COVID-19,
|
||||
a Coronavirus. Many states in the U.S., including my own, issued
|
||||
the orders for lockdown and social distancing, which meant the closing
|
||||
of schools, restaurants, and, apparently, the cessation of in-person
|
||||
repairs.
|
||||
{{< /sidenote >}}and DELL told me they'd mail me a box to put my laptop in, and
|
||||
I'd have to mail it off to their service center. Sure, I thought, that's
|
||||
fine. If it's at the service center, they won't ever "not have the required
|
||||
parts". I told the tech support person my address, he read it back to me, and
|
||||
so it was settled.
|
||||
|
||||
Until, that is, the box arrived at the wrong address.
|
||||
|
||||
I had received the machine as a gift from my family, who purchased the
|
||||
computer to arrive at their address. The box arrived at that address too,
|
||||
despite my explicit instructions to have it deliver to my current residence.
|
||||
Since my family and I live 2 hours apart, it took 4 total hours to get the box
|
||||
to me (a drive that couldn't be made right away!), and by the time I had it,
|
||||
DELL was already threatening me again with closing the service request.
|
||||
Eventually, I was able to mail the machine off, and about 5 business days
|
||||
later (business days during which I did not have a working machine, which is
|
||||
very necessary for my school and job) I received it back. I was excited to
|
||||
have the machine back, but that didn't last very long. As I was using the
|
||||
computer with Wolfram Mathematica (a rather heavy piece of software running
|
||||
under Linux), I noticed that it was discharging even while plugged in. I
|
||||
booted into Windows, and was greeted with a warning, something along the
|
||||
lines of: "you are using a slow charger. Please use the official adapter".
|
||||
But I was using the official adapter! I also tried to plug my mouse into the
|
||||
relevant USB-C port, only to discover that it did not work. I had to make
|
||||
another service requests.
|
||||
|
||||
### Service Request 3: Broken Charging Port
|
||||
This time, I made sure to tell the person on the other end of the support
|
||||
call to please send it to my address. I asked if there was anything I can do,
|
||||
or anyone I can contact, and was told "no, just mail the computer in again."
|
||||
I obliged. The box arrived at the right address this time, so I was able to
|
||||
ship it off.
|
||||
|
||||
In the "describe your issue" field on the provided form, I begged the
|
||||
technicians to send me a working machine. "Please", I wrote "Last time I got
|
||||
a machine back from support, it was still broken. I really need it for school
|
||||
and work!". 5 business days later, I received the machine back. I plugged it
|
||||
in to make sure it worked, only to find out . . . that the very same charging
|
||||
port that I requested be repaired, is still broken! It would've been funny,
|
||||
if it wasn't infuriating. How is it possible for me to receive a machine from
|
||||
repairs, without the thing I asked to repair being as much as improved?!
|
||||
|
||||
Worse, a day after I received the machine back (I was able to keep using it
|
||||
thanks to it having two USB-C ports capable of charging), the LCD suddenly
|
||||
flashed, and started flickering. Thinking it was a software glitch, I
|
||||
restarted the machine, only to discover the same flickering during the boot
|
||||
animation and menu. Not only was the charging port not repaired, but now my
|
||||
LCD was broken! (in the below picture, the screen is meant to be blue, but
|
||||
the bottom part of the display is purple and flickering).
|
||||
|
||||
{{< figure src="brokenlcd.jpg" caption="The broken LCD." >}}
|
||||
|
||||
### Service Request 4: Broken LCD
|
||||
I called in to support again, and they once again told me to ship the machine
|
||||
off. What's worse, they accused me of breaking the port myself, and told me
|
||||
this was no longer covered under basic warranty. I had to explain all over
|
||||
again that the port worked fine before the fateful day the D-key snapped. They
|
||||
told me they'd "look into it". Eventually, I received a box in the mail. I
|
||||
wasn't told I would be receiving a box, but that wasn't a big deal. I mailed
|
||||
off the machine.
|
||||
|
||||
The UPS shipping was always the most streamlined part of the process. A day
|
||||
later, I was told my machine was received intact. Another day, and I was
|
||||
informed that the technicians are starting to work on it. And then,
|
||||
a few hours later:
|
||||
|
||||
> __Current Status:__
|
||||
> The part(s) needed to repair your system are not currently in stock.
|
||||
> __What's Next:__
|
||||
> In most cases the parts are available is less than five days.
|
||||
|
||||
A few days is no big deal, and it made sense that DELL wouldn't just
|
||||
have screens lying around. So I waited. And waited. And waited. Two weeks
|
||||
later, I got a little tired of waiting, and called the repair center.
|
||||
An automated message told me:
|
||||
|
||||
> We're currently experiencing heavy call volumes. Please try again later. Goodbye.
|
||||
|
||||
And the call was dropped. This happened every time I tried to call, no matter
|
||||
the hour. The original status update -- the one that notified me about the
|
||||
part shortage -- came on May 8th, but the machine finally arrived to me
|
||||
(without prior warning) on June 2nd, almost a month later.
|
||||
|
||||
The charging port worked. Sound
|
||||
worked. The screen wasn't flickering. I was happy for the brief moments that
|
||||
my computer was loading. As soon as I started vim, though, I noticed something
|
||||
was off: the fonts looked more pixelated. The DPI settings I'd painstakingly
|
||||
tweaked were wrong. Now that I thought about it, even the GRUB menu was
|
||||
larger. My suspicion growing, I booted into Windows, and looked at the display
|
||||
settings. Noticeably fewer resolutions were listed in the drop-down menu;
|
||||
worse, the highest resolution was 1080p. After almost a month of waiting,
|
||||
DELL replaced my 4k laptop display with a 1080p one.
|
||||
|
||||
### System Replacement: Worse LCD Screen
|
||||
|
||||
I admit, I was angry. At the same time, the absurdity of it all was also
|
||||
unbearable. Was this constant loop of hardware damage, the endless number of
|
||||
support calls filled with hoarse jazz music, part of some kind of Kafkaesque
|
||||
dream? I didn't know. I was at the end of my wits as to what to do. As a last
|
||||
resort, I made [a tweet](https://twitter.com/DanilaTheWalrus/status/1268056637383692289)
|
||||
from my almost-abandoned account. DELL Support's Twitter
|
||||
account [quickly responded](https://twitter.com/DellCares/status/1268064691416334344), eager as always to destroy any semblance of
|
||||
transparency by switching to private messages. I let them know my thoughts on the matter. I wanted a new machine.
|
||||
|
||||
{{< figure src="dm_1.png" caption="The first real exchange between me and DELL support." >}}
|
||||
|
||||
Of course we can proceed further. I wanted to know what kind of machine I was getting,
|
||||
though. As long as it was the same model that I originally bought,
|
||||
{{< sidenote "right" "replacement-note" "it would be better than what I have." >}}
|
||||
At least in principle, it would be. Perhaps the wear and tear on the replacement
|
||||
parts would be greater, but at least I would have, presumably, a machine
|
||||
in good condition that had the 4k screen that made me buy it in the first place.
|
||||
{{< /sidenote >}}
|
||||
Despite this, I knew that the machine I was getting was likely refurbished.
|
||||
This _had_ to mean that some of the parts would come from other, used, machines.
|
||||
This irked me, because, well, I payed for a new machine.
|
||||
|
||||
{{< figure src="dm_2.png" caption="Ah, the classic use of canned responses." >}}
|
||||
|
||||
Their use of the canned response, and their unwillingness to answer this simple
|
||||
question, was transparent. Indeed, the machine would be made of used
|
||||
parts. I still wanted to proceed. DELL requested that I sent an image of
|
||||
my machine which included its service tag, together with a piece of
|
||||
paper which included my name and email address. I obliged, and quickly got a response:
|
||||
|
||||
{{< figure src="dm_3.png" caption="If it was me who was silent for 4 days, my request would've long been cancelled. " >}}
|
||||
|
||||
Thanks, Kalpana. You will never hear this name again, not in this post.
|
||||
Only one or two messages from DELL support are ever from the same person.
|
||||
About a week later, I get the following beauty:
|
||||
|
||||
{{< figure src="dm_4.png" caption="Excuse me? What's going on?" >}}
|
||||
|
||||
My initial request was cancelled? Why wasn't I told? What was the reason?
|
||||
What the heck was going on at DELL Support? Should I be worried?
|
||||
My question of "Why" was answered with the apt response of "Yes",
|
||||
and a message meant to pacify me. While this was going on, I ordered
|
||||
a
|
||||
{{< sidenote "right" "pinebook-note" "Pinebook Pro." >}}
|
||||
The Pinebook – a $200 machine – has, thus far, worked more reliably than any DELL product
|
||||
I've had the misfortune of owning.
|
||||
{{< /sidenote >}} It was not a replacement for the DELL machine, but rather
|
||||
the first step towards migrating my setup to a stationary computer,
|
||||
and a small, lightweight SSH device. At this point,
|
||||
there was no more faith in DELL left in my mind.
|
||||
|
||||
Soon, DELL required my attention, only to tell me that they put in
|
||||
a request to see that status of my request. How bureaucratic. Two
|
||||
more names -- Kareem and JKC -- flickered through the chats,
|
||||
also never to be seen again.
|
||||
|
||||
{{< figure src="dm_5.png" caption="Not much of a conversation, really." >}}
|
||||
|
||||
Finally, on July 9th (a month and six days after my first real message to DELL
|
||||
support), I was notified by my roommates that FedEx tried to deliver a package
|
||||
to our house, but gave up when no one came to sign for it. On one hand, this
|
||||
is great: FedEx didn't just leave my laptop on the porch. On the other hand,
|
||||
though, this was the first time I heard about receiving the machine. I got
|
||||
to the house the next day, unpacked the new computer, and tested all the things
|
||||
that had, at one point, failed. Everything seemed to work. I transfered all my
|
||||
files, wiped the old computer clean, and mailed it off. I also spent some
|
||||
time dealing with the fallout of DELL PremierColor starting on its own,
|
||||
and permanently altering the color profile of my display. I don't have the
|
||||
special, physical calibration device, and therefore still suspect that my
|
||||
screen is somewhat green.
|
||||
|
||||
Today, I discovered that the microphone of the replacement machine didn't work.
|
||||
|
||||
### Am I The Problem?
|
||||
When the mysterious FedEx package arrived at my door on July 9th, I did some
|
||||
digging to verify my suspicion that it was from DELL. I discovered their
|
||||
HQ in Lebanon, TN. This gave me an opportunity to
|
||||
{{< sidenote "right" "reviews-note" "see" >}}
|
||||
See, of course, modulo whatever bias arises when only those who feel strongly leave reviews.
|
||||
{{< /sidenote >}} whether or not I was alone in this situation. I was genuinely
|
||||
worried that I was suffering from the technical variant of
|
||||
[Munchausen Syndrome](https://www.webmd.com/mental-health/munchausen-syndrome#1),
|
||||
and that I was compulsively breaking my electronics. These worries were
|
||||
dispelled by the reviews on Google:
|
||||
|
||||
{{< figure src="reviews_1.png" caption="Most of the reviews are pretty terse, but the ratings convey the general idea." >}}
|
||||
|
||||
There were even some that were shockingly similar in terms of the apparent
|
||||
incompetence of the DELL technicians:
|
||||
|
||||
{{< figure src="reviews_2.png" caption="Now, now, Maggie, I wouldn't go as far as recommending Apple." >}}
|
||||
|
||||
So, this is not uncommon. This is how DELL deals with customers now. It's
|
||||
awfully tiring, really; I've been in and out of repairs continuously for
|
||||
almost half a year, now. That's 2.5% of my life at the time of writing,
|
||||
all non-stop since the D-key. And these people probably have spent considerable
|
||||
amounts of time, too.
|
||||
|
||||
### It's About the Principle
|
||||
The microphone on my machine is rather inconsequential to me. I can, and regularly do,
|
||||
teleconference from my phone (a habit that I developed thanks to DELL, since
|
||||
my computer was so often unavailable). I don't need to dictate anything. Most
|
||||
of my communication is via chat.
|
||||
|
||||
Really, compared to the other issues (keyboard, sound, charging, USB ports, the broken or low-resolution screen)
|
||||
the microphone is a benign problem. As I have now learned, things could be worse.
|
||||
|
||||
But why should the thought, _"It could be worse"_, even cross my mind
|
||||
when dealing with such a matter? Virtually every issue that has
|
||||
occurred with my computer thus far could -- should! -- have been diagnosed
|
||||
at the repair center. The 'slow charger' warning shows up in BIOS,
|
||||
so just turning the computer on while plugged in should make it obvious something
|
||||
is wrong; doubly so when the very reason that the laptop was in repairs
|
||||
in the first place was because of the faulty charger. I refuse to believe
|
||||
that screens with different resolutions have the same part identifier,
|
||||
either. How have the standards of service from DELL fallen so low?
|
||||
How come this absurd scenario plays out not just for me, but
|
||||
for others as well? It would be comforting, in a way, to think
|
||||
that I was just the 'exceptional case'. But apparently, I'm not.
|
||||
This is standard practice.
|
||||
|
||||
### Tl;DR
|
||||
Here are he problems I've had with DELL:
|
||||
|
||||
* The machine shipped, apparently, with a missing piece of insulation.
|
||||
* The "D" key on the keyboard snapped after only a few months of use.
|
||||
* While repairing the "D" key, the DELL technician broke the computer's sound and microphone.
|
||||
* While repairing the sound and microphone, the DELL technicians broke a charging port.
|
||||
* The DELL technicians failed to repair the charging port, mailing me back a machine
|
||||
exhibiting the same issues, in addition to a broken LCD screen.
|
||||
* The repair of the LCD screen took almost a month, and concluded
|
||||
with me receiving a worse quality screen than I originally had.
|
||||
* The system replacement that followed the botched LCD repair took
|
||||
over a month to go through.
|
||||
* The replaced system was made partially of used parts, which
|
||||
DELL refused to admit.
|
||||
* The microphone on the replacement system was broken.
|
||||
|
||||
### Closing Thoughts
|
||||
I will not be sending my system in again. It doesn't make sense to do so -
|
||||
after mailing my system in for repairs three times, I've measured empirically that
|
||||
the chance of failure is 100%. Every service request is a lottery, dutifully
|
||||
giving out a random prize of another broken part. I no longer wish to play;
|
||||
as any person who gambles should, I will quit while I'm ahead, and cut my losses.
|
||||
However, I hope for this story, which may be unusual in its level of detail,
|
||||
but not its content, to be seen by seen by someone. I hope to prevent
|
||||
someone out there from feeling the frustration, and anger, and peculiar amusement
|
||||
that I felt during this process. I hope for someone else to purchase a computer
|
||||
with money, and not with their sanity. A guy can hope.
|
||||
|
||||
If you're reading this, please take this as a warning. __DELL is a horrible
|
||||
company. They have the lowest standards of customer support of any
|
||||
U.S. company that I've encountered. Their technicians are largely incompetent.
|
||||
Their quality assurance is non-existent. Stay away from them.__
|
||||