Compare commits
67 Commits
search
...
4d24e7095b
| Author | SHA1 | Date | |
|---|---|---|---|
| 4d24e7095b | |||
| 6c1940f5d2 | |||
| 30c395151d | |||
| d72e64c7f9 | |||
| abdc8e5056 | |||
| bc754c7a7d | |||
| 84ad8d43b5 | |||
| e440630497 | |||
| 71689fce79 | |||
| e7185ff460 | |||
| 18f493675a | |||
| 0c004b2e85 | |||
| c214d9ee37 | |||
| 72259c16a9 | |||
| 66b656ada5 | |||
| 46e4ca3948 | |||
| f2bf2fb025 | |||
| 50d48deec1 | |||
| 3c905aa1d7 | |||
| d5f478b3c6 | |||
| 0f96b93532 | |||
| 5449affbc8 | |||
| 2cf19900db | |||
| efe5d08430 | |||
| 994e9ed8d2 | |||
| 72af5cb7f0 | |||
| 308ee34025 | |||
| 9839befdf1 | |||
| d688df6c92 | |||
| 24eef25984 | |||
| 77ae0be899 | |||
| ca939da28e | |||
| 5d0920cb6d | |||
| d1ea7b5364 | |||
| ebdb986e2a | |||
| 4bb6695c2e | |||
| a6c5a42c1d | |||
| c44c718d06 | |||
| 5e4097453b | |||
| bfeae89ab5 | |||
| 755364c0df | |||
| dcb1e9a736 | |||
| c8543961af | |||
| cbad3b76eb | |||
| b3ff2fe135 | |||
| 6a6f25547e | |||
| 43dfee56cc | |||
| 6f9a2ce092 | |||
| 06014eade9 | |||
| 6f92a50c83 | |||
| 60eb50737d | |||
| 250746e686 | |||
| 3bac151b08 | |||
| c61d9ccb99 | |||
| 56ad03b833 | |||
| 2f9e6278ba | |||
| 17e0fbc6fb | |||
| 7ee7feadf3 | |||
| b36ea558a3 | |||
| 17d6a75465 | |||
| d5541bc985 | |||
| 98a46e9fd4 | |||
| 2e3074df00 | |||
| b3dc3e690b | |||
| b1943ede2f | |||
| 0467e4e12f | |||
| 8164624cee |
9
.gitmodules
vendored
Normal file
9
.gitmodules
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
[submodule "code/aoc-2020"]
|
||||||
|
path = code/aoc-2020
|
||||||
|
url = https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020.git
|
||||||
|
[submodule "code/libabacus"]
|
||||||
|
path = code/libabacus
|
||||||
|
url = https://dev.danilafe.com/Experiments/libabacus
|
||||||
|
[submodule "themes/vanilla"]
|
||||||
|
path = themes/vanilla
|
||||||
|
url = https://dev.danilafe.com/Web-Projects/vanilla-hugo.git
|
||||||
56
assets/scss/donate.scss
Normal file
56
assets/scss/donate.scss
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
@import "../../themes/vanilla/assets/scss/mixins.scss";
|
||||||
|
|
||||||
|
.donation-methods {
|
||||||
|
padding: 0;
|
||||||
|
border: none;
|
||||||
|
border-spacing: 0 0.5rem;
|
||||||
|
|
||||||
|
td {
|
||||||
|
padding: 0;
|
||||||
|
overflow: hidden;
|
||||||
|
|
||||||
|
&:first-child {
|
||||||
|
@include bordered-block;
|
||||||
|
text-align: right;
|
||||||
|
border-right: none;
|
||||||
|
border-top-right-radius: 0;
|
||||||
|
border-bottom-right-radius: 0;
|
||||||
|
padding-left: 0.5em;
|
||||||
|
padding-right: 0.5rem;
|
||||||
|
|
||||||
|
@include below-container-width {
|
||||||
|
@include bordered-block;
|
||||||
|
text-align: center;
|
||||||
|
border-bottom: none;
|
||||||
|
border-bottom-left-radius: 0;
|
||||||
|
border-bottom-right-radius: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
&:last-child {
|
||||||
|
@include bordered-block;
|
||||||
|
border-top-left-radius: 0;
|
||||||
|
border-bottom-left-radius: 0;
|
||||||
|
|
||||||
|
@include below-container-width {
|
||||||
|
@include bordered-block;
|
||||||
|
border-top-left-radius: 0;
|
||||||
|
border-top-right-radius: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tr {
|
||||||
|
@include below-container-width {
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code {
|
||||||
|
width: 100%;
|
||||||
|
box-sizing: border-box;
|
||||||
|
border: none;
|
||||||
|
display: inline-block;
|
||||||
|
padding: 0.25rem;
|
||||||
|
}
|
||||||
|
}
|
||||||
1
code/aoc-2020
Submodule
1
code/aoc-2020
Submodule
Submodule code/aoc-2020 added at 7a8503c3fe
@@ -1,102 +0,0 @@
|
|||||||
Require Import Coq.Lists.List.
|
|
||||||
Require Import Omega.
|
|
||||||
|
|
||||||
Definition has_pair (t : nat) (is : list nat) : Prop :=
|
|
||||||
exists n1 n2 : nat, n1 <> n2 /\ In n1 is /\ In n2 is /\ n1 + n2 = t.
|
|
||||||
|
|
||||||
Fixpoint find_matching (is : list nat) (total : nat) (x : nat) : option nat :=
|
|
||||||
match is with
|
|
||||||
| nil => None
|
|
||||||
| cons y ys =>
|
|
||||||
if Nat.eqb (x + y) total
|
|
||||||
then Some y
|
|
||||||
else find_matching ys total x
|
|
||||||
end.
|
|
||||||
|
|
||||||
Fixpoint find_sum (is : list nat) (total : nat) : option (nat * nat) :=
|
|
||||||
match is with
|
|
||||||
| nil => None
|
|
||||||
| cons x xs =>
|
|
||||||
match find_matching xs total x with
|
|
||||||
| None => find_sum xs total (* Was buggy! *)
|
|
||||||
| Some y => Some (x, y)
|
|
||||||
end
|
|
||||||
end.
|
|
||||||
|
|
||||||
Lemma find_matching_correct : forall is k x y,
|
|
||||||
find_matching is k x = Some y -> x + y = k.
|
|
||||||
Proof.
|
|
||||||
intros is. induction is;
|
|
||||||
intros k x y Hev.
|
|
||||||
- simpl in Hev. inversion Hev.
|
|
||||||
- simpl in Hev. destruct (Nat.eqb (x+a) k) eqn:Heq.
|
|
||||||
+ injection Hev as H; subst.
|
|
||||||
apply EqNat.beq_nat_eq. auto.
|
|
||||||
+ apply IHis. assumption.
|
|
||||||
Qed.
|
|
||||||
|
|
||||||
Lemma find_matching_skip : forall k x y i is,
|
|
||||||
find_matching is k x = Some y -> find_matching (cons i is) k x = Some y.
|
|
||||||
Proof.
|
|
||||||
intros k x y i is Hsmall.
|
|
||||||
simpl. destruct (Nat.eqb (x+i) k) eqn:Heq.
|
|
||||||
- apply find_matching_correct in Hsmall.
|
|
||||||
symmetry in Heq. apply EqNat.beq_nat_eq in Heq.
|
|
||||||
assert (i = y). { omega. } rewrite H. reflexivity.
|
|
||||||
- assumption.
|
|
||||||
Qed.
|
|
||||||
|
|
||||||
Lemma find_matching_works : forall is k x y, In y is /\ x + y = k ->
|
|
||||||
find_matching is k x = Some y.
|
|
||||||
Proof.
|
|
||||||
intros is. induction is;
|
|
||||||
intros k x y [Hin Heq].
|
|
||||||
- inversion Hin.
|
|
||||||
- inversion Hin.
|
|
||||||
+ subst a. simpl. Search Nat.eqb.
|
|
||||||
destruct (Nat.eqb_spec (x+y) k).
|
|
||||||
* reflexivity.
|
|
||||||
* exfalso. apply n. assumption.
|
|
||||||
+ apply find_matching_skip. apply IHis.
|
|
||||||
split; assumption.
|
|
||||||
Qed.
|
|
||||||
|
|
||||||
Theorem find_sum_works :
|
|
||||||
forall k is, has_pair k is ->
|
|
||||||
exists x y, (find_sum is k = Some (x, y) /\ x + y = k).
|
|
||||||
Proof.
|
|
||||||
intros k is. generalize dependent k.
|
|
||||||
induction is; intros k [x' [y' [Hneq [Hinx [Hiny Hsum]]]]].
|
|
||||||
- (* is is empty. But x is in is! *)
|
|
||||||
inversion Hinx.
|
|
||||||
- (* is is not empty. *)
|
|
||||||
inversion Hinx.
|
|
||||||
+ (* x is the first element. *)
|
|
||||||
subst a. inversion Hiny.
|
|
||||||
* (* y is also the first element; but this is impossible! *)
|
|
||||||
exfalso. apply Hneq. apply H.
|
|
||||||
* (* y is somewhere in the rest of the list.
|
|
||||||
We've proven that we will find it! *)
|
|
||||||
exists x'. simpl.
|
|
||||||
erewrite find_matching_works.
|
|
||||||
{ exists y'. split. reflexivity. assumption. }
|
|
||||||
{ split; assumption. }
|
|
||||||
+ (* x is not the first element. *)
|
|
||||||
inversion Hiny.
|
|
||||||
* (* y is the first element,
|
|
||||||
so x is somewhere in the rest of the list.
|
|
||||||
Again, we've proven that we can find it. *)
|
|
||||||
subst a. exists y'. simpl.
|
|
||||||
erewrite find_matching_works.
|
|
||||||
{ exists x'. split. reflexivity. rewrite plus_comm. assumption. }
|
|
||||||
{ split. assumption. rewrite plus_comm. assumption. }
|
|
||||||
* (* y is not the first element, either.
|
|
||||||
Of course, there could be another matching pair
|
|
||||||
starting with a. Otherwise, the inductive hypothesis applies. *)
|
|
||||||
simpl. destruct (find_matching is k a) eqn:Hf.
|
|
||||||
{ exists a. exists n. split.
|
|
||||||
reflexivity.
|
|
||||||
apply find_matching_correct with is. assumption. }
|
|
||||||
{ apply IHis. unfold has_pair. exists x'. exists y'.
|
|
||||||
repeat split; assumption. }
|
|
||||||
Qed.
|
|
||||||
179
code/dawn/Dawn.v
Normal file
179
code/dawn/Dawn.v
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
Require Import Coq.Lists.List.
|
||||||
|
From Ltac2 Require Import Ltac2.
|
||||||
|
|
||||||
|
Inductive intrinsic :=
|
||||||
|
| swap
|
||||||
|
| clone
|
||||||
|
| drop
|
||||||
|
| quote
|
||||||
|
| compose
|
||||||
|
| apply.
|
||||||
|
|
||||||
|
Inductive expr :=
|
||||||
|
| e_int (i : intrinsic)
|
||||||
|
| e_quote (e : expr)
|
||||||
|
| e_comp (e1 e2 : expr).
|
||||||
|
|
||||||
|
Definition e_compose (e : expr) (es : list expr) := fold_left e_comp es e.
|
||||||
|
|
||||||
|
Inductive IsValue : expr -> Prop :=
|
||||||
|
| Val_quote : forall {e : expr}, IsValue (e_quote e).
|
||||||
|
|
||||||
|
Definition value := { v : expr & IsValue v }.
|
||||||
|
Definition value_stack := list value.
|
||||||
|
|
||||||
|
Definition v_quote (e : expr) := existT IsValue (e_quote e) Val_quote.
|
||||||
|
|
||||||
|
Inductive Sem_int : value_stack -> intrinsic -> value_stack -> Prop :=
|
||||||
|
| Sem_swap : forall (v v' : value) (vs : value_stack), Sem_int (v' :: v :: vs) swap (v :: v' :: vs)
|
||||||
|
| Sem_clone : forall (v : value) (vs : value_stack), Sem_int (v :: vs) clone (v :: v :: vs)
|
||||||
|
| Sem_drop : forall (v : value) (vs : value_stack), Sem_int (v :: vs) drop vs
|
||||||
|
| Sem_quote : forall (v : value) (vs : value_stack), Sem_int (v :: vs) quote ((v_quote (projT1 v)) :: vs)
|
||||||
|
| Sem_compose : forall (e e' : expr) (vs : value_stack), Sem_int (v_quote e' :: v_quote e :: vs) compose (v_quote (e_comp e e') :: vs)
|
||||||
|
| Sem_apply : forall (e : expr) (vs vs': value_stack), Sem_expr vs e vs' -> Sem_int (v_quote e :: vs) apply vs'
|
||||||
|
|
||||||
|
with Sem_expr : value_stack -> expr -> value_stack -> Prop :=
|
||||||
|
| Sem_e_int : forall (i : intrinsic) (vs vs' : value_stack), Sem_int vs i vs' -> Sem_expr vs (e_int i) vs'
|
||||||
|
| Sem_e_quote : forall (e : expr) (vs : value_stack), Sem_expr vs (e_quote e) (v_quote e :: vs)
|
||||||
|
| Sem_e_comp : forall (e1 e2 : expr) (vs1 vs2 vs3 : value_stack),
|
||||||
|
Sem_expr vs1 e1 vs2 -> Sem_expr vs2 e2 vs3 -> Sem_expr vs1 (e_comp e1 e2) vs3.
|
||||||
|
|
||||||
|
Definition false : expr := e_quote (e_int drop).
|
||||||
|
Definition false_v : value := v_quote (e_int drop).
|
||||||
|
|
||||||
|
Definition true : expr := e_quote (e_comp (e_int swap) (e_int drop)).
|
||||||
|
Definition true_v : value := v_quote (e_comp (e_int swap) (e_int drop)).
|
||||||
|
|
||||||
|
Theorem false_correct : forall (v v' : value) (vs : value_stack), Sem_expr (v' :: v :: vs) (e_comp false (e_int apply)) (v :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v v' vs.
|
||||||
|
eapply Sem_e_comp.
|
||||||
|
- apply Sem_e_quote.
|
||||||
|
- apply Sem_e_int. apply Sem_apply. apply Sem_e_int. apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem true_correct : forall (v v' : value) (vs : value_stack), Sem_expr (v' :: v :: vs) (e_comp true (e_int apply)) (v' :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v v' vs.
|
||||||
|
eapply Sem_e_comp.
|
||||||
|
- apply Sem_e_quote.
|
||||||
|
- apply Sem_e_int. apply Sem_apply. eapply Sem_e_comp.
|
||||||
|
* apply Sem_e_int. apply Sem_swap.
|
||||||
|
* apply Sem_e_int. apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Definition or : expr := e_comp (e_int clone) (e_int apply).
|
||||||
|
|
||||||
|
Theorem or_false_v : forall (v : value) (vs : value_stack), Sem_expr (false_v :: v :: vs) or (v :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v vs.
|
||||||
|
eapply Sem_e_comp...
|
||||||
|
- apply Sem_clone.
|
||||||
|
- apply Sem_apply... apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem or_true : forall (v : value) (vs : value_stack), Sem_expr (true_v :: v :: vs) or (true_v :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v vs.
|
||||||
|
eapply Sem_e_comp...
|
||||||
|
- apply Sem_clone...
|
||||||
|
- apply Sem_apply. eapply Sem_e_comp...
|
||||||
|
* apply Sem_swap.
|
||||||
|
* apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Definition or_false_false := or_false_v false_v.
|
||||||
|
Definition or_false_true := or_false_v true_v.
|
||||||
|
Definition or_true_false := or_true false_v.
|
||||||
|
Definition or_true_true := or_true true_v.
|
||||||
|
|
||||||
|
Fixpoint quote_n (n : nat) :=
|
||||||
|
match n with
|
||||||
|
| O => e_int quote
|
||||||
|
| S n' => e_compose (quote_n n') (e_int swap :: e_int quote :: e_int swap :: e_int compose :: nil)
|
||||||
|
end.
|
||||||
|
|
||||||
|
Theorem quote_2_correct : forall (v1 v2 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v2 :: v1 :: vs) (quote_n 1) (v_quote (e_comp (projT1 v1) (projT1 v2)) :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v1 v2 vs. simpl.
|
||||||
|
repeat (eapply Sem_e_comp)...
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem quote_3_correct : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (quote_n 2) (v_quote (e_comp (projT1 v1) (e_comp (projT1 v2) (projT1 v3))) :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v1 v2 v3 vs. simpl.
|
||||||
|
repeat (eapply Sem_e_comp)...
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Ltac2 rec solve_basic () := Control.enter (fun _ =>
|
||||||
|
match! goal with
|
||||||
|
| [|- Sem_int ?vs1 swap ?vs2] => apply Sem_swap
|
||||||
|
| [|- Sem_int ?vs1 clone ?vs2] => apply Sem_clone
|
||||||
|
| [|- Sem_int ?vs1 drop ?vs2] => apply Sem_drop
|
||||||
|
| [|- Sem_int ?vs1 quote ?vs2] => apply Sem_quote
|
||||||
|
| [|- Sem_int ?vs1 compose ?vs2] => apply Sem_compose
|
||||||
|
| [|- Sem_int ?vs1 apply ?vs2] => apply Sem_apply
|
||||||
|
| [|- Sem_expr ?vs1 (e_comp ?e1 ?e2) ?vs2] => eapply Sem_e_comp; solve_basic ()
|
||||||
|
| [|- Sem_expr ?vs1 (e_int ?e) ?vs2] => apply Sem_e_int; solve_basic ()
|
||||||
|
| [|- Sem_expr ?vs1 (e_quote ?e) ?vs2] => apply Sem_e_quote
|
||||||
|
| [_ : _ |- _] => ()
|
||||||
|
end).
|
||||||
|
|
||||||
|
Theorem quote_2_correct' : forall (v1 v2 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v2 :: v1 :: vs) (quote_n 1) (v_quote (e_comp (projT1 v1) (projT1 v2)) :: vs).
|
||||||
|
Proof. intros. simpl. solve_basic (). Qed.
|
||||||
|
|
||||||
|
Theorem quote_3_correct' : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (quote_n 2) (v_quote (e_comp (projT1 v1) (e_comp (projT1 v2) (projT1 v3))) :: vs).
|
||||||
|
Proof. intros. simpl. solve_basic (). Qed.
|
||||||
|
|
||||||
|
Definition rotate_n (n : nat) := e_compose (quote_n n) (e_int swap :: e_int quote :: e_int compose :: e_int apply :: nil).
|
||||||
|
|
||||||
|
Lemma eval_value : forall (v : value) (vs : value_stack),
|
||||||
|
Sem_expr vs (projT1 v) (v :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v vs.
|
||||||
|
destruct v. destruct i.
|
||||||
|
simpl. apply Sem_e_quote.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem rotate_3_correct : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (rotate_n 1) (v1 :: v3 :: v2 :: vs).
|
||||||
|
Proof.
|
||||||
|
intros. unfold rotate_n. simpl. solve_basic ().
|
||||||
|
repeat (eapply Sem_e_comp); apply eval_value.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem rotate_4_correct : forall (v1 v2 v3 v4 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v4 :: v3 :: v2 :: v1 :: vs) (rotate_n 2) (v1 :: v4 :: v3 :: v2 :: vs).
|
||||||
|
Proof.
|
||||||
|
intros. unfold rotate_n. simpl. solve_basic ().
|
||||||
|
repeat (eapply Sem_e_comp); apply eval_value.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem e_comp_assoc : forall (e1 e2 e3 : expr) (vs vs' : value_stack),
|
||||||
|
Sem_expr vs (e_comp e1 (e_comp e2 e3)) vs' <-> Sem_expr vs (e_comp (e_comp e1 e2) e3) vs'.
|
||||||
|
Proof.
|
||||||
|
intros e1 e2 e3 vs vs'.
|
||||||
|
split; intros Heval.
|
||||||
|
- inversion Heval; subst. inversion H4; subst.
|
||||||
|
eapply Sem_e_comp. eapply Sem_e_comp. apply H2. apply H3. apply H6.
|
||||||
|
- inversion Heval; subst. inversion H2; subst.
|
||||||
|
eapply Sem_e_comp. apply H3. eapply Sem_e_comp. apply H6. apply H4.
|
||||||
|
Qed.
|
||||||
223
code/dawn/DawnEval.v
Normal file
223
code/dawn/DawnEval.v
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
Require Import Coq.Lists.List.
|
||||||
|
Require Import DawnV2.
|
||||||
|
Require Import Coq.Program.Equality.
|
||||||
|
From Ltac2 Require Import Ltac2.
|
||||||
|
|
||||||
|
Inductive step_result :=
|
||||||
|
| err
|
||||||
|
| middle (e : expr) (s : value_stack)
|
||||||
|
| final (s : value_stack).
|
||||||
|
|
||||||
|
Fixpoint eval_step (s : value_stack) (e : expr) : step_result :=
|
||||||
|
match e, s with
|
||||||
|
| e_int swap, v' :: v :: vs => final (v :: v' :: vs)
|
||||||
|
| e_int clone, v :: vs => final (v :: v :: vs)
|
||||||
|
| e_int drop, v :: vs => final vs
|
||||||
|
| e_int quote, v :: vs => final (v_quote (value_to_expr v) :: vs)
|
||||||
|
| e_int compose, (v_quote v2) :: (v_quote v1) :: vs => final (v_quote (e_comp v1 v2) :: vs)
|
||||||
|
| e_int apply, (v_quote v1) :: vs => middle v1 vs
|
||||||
|
| e_quote e', vs => final (v_quote e' :: vs)
|
||||||
|
| e_comp e1 e2, vs =>
|
||||||
|
match eval_step vs e1 with
|
||||||
|
| final vs' => middle e2 vs'
|
||||||
|
| middle e1' vs' => middle (e_comp e1' e2) vs'
|
||||||
|
| err => err
|
||||||
|
end
|
||||||
|
| _, _ => err
|
||||||
|
end.
|
||||||
|
|
||||||
|
Theorem eval_step_correct : forall (e : expr) (vs vs' : value_stack), Sem_expr vs e vs' ->
|
||||||
|
(eval_step vs e = final vs') \/
|
||||||
|
(exists (ei : expr) (vsi : value_stack),
|
||||||
|
eval_step vs e = middle ei vsi /\
|
||||||
|
Sem_expr vsi ei vs').
|
||||||
|
Proof.
|
||||||
|
intros e vs vs' Hsem.
|
||||||
|
(* Proceed by induction on the semantics. *)
|
||||||
|
induction Hsem.
|
||||||
|
- inversion H; (* The expression is just an intrnsic. *)
|
||||||
|
(* Dismiss all the straightforward "final" cases,
|
||||||
|
of which most intrinsics are. *)
|
||||||
|
try (left; reflexivity).
|
||||||
|
(* Only apply remains; We are in an intermediate / middle case. *)
|
||||||
|
right.
|
||||||
|
(* The semantics guarantee that the expression in the
|
||||||
|
quote evaluates to the final state. *)
|
||||||
|
exists e, vs0. auto.
|
||||||
|
- (* The expression is a quote. This is yet another final case. *)
|
||||||
|
left; reflexivity.
|
||||||
|
- (* The composition is never a final step, since we have to evaluate both
|
||||||
|
branches to "finish up". *)
|
||||||
|
destruct IHHsem1; right.
|
||||||
|
+ (* If the left branch finihed, only the right branch needs to be evaluted. *)
|
||||||
|
simpl. rewrite H. exists e2, vs2. auto.
|
||||||
|
+ (* Otherwise, the left branch has an intermediate evaluation, guaranteed
|
||||||
|
by induction to be consitent. *)
|
||||||
|
destruct H as [ei [vsi [Heval Hsem']]].
|
||||||
|
(* We compose the remaining part of the left branch with the right branch. *)
|
||||||
|
exists (e_comp ei e2), vsi. simpl.
|
||||||
|
(* The evaluation is trivially to a "middle" state. *)
|
||||||
|
rewrite Heval. split. auto.
|
||||||
|
eapply Sem_e_comp. apply Hsem'. apply Hsem2.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Inductive eval_chain (vs : value_stack) (e : expr) (vs' : value_stack) : Prop :=
|
||||||
|
| chain_final (P : eval_step vs e = final vs')
|
||||||
|
| chain_middle (ei : expr) (vsi : value_stack)
|
||||||
|
(P : eval_step vs e = middle ei vsi) (rest : eval_chain vsi ei vs').
|
||||||
|
|
||||||
|
Lemma eval_chain_merge : forall (e1 e2 : expr) (vs vs' vs'' : value_stack),
|
||||||
|
eval_chain vs e1 vs' -> eval_chain vs' e2 vs'' -> eval_chain vs (e_comp e1 e2) vs''.
|
||||||
|
Proof.
|
||||||
|
intros e1 e2 vs vs' vs'' ch1 ch2.
|
||||||
|
induction ch1;
|
||||||
|
eapply chain_middle; simpl; try (rewrite P); auto.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Lemma eval_chain_split : forall (e1 e2 : expr) (vs vs'' : value_stack),
|
||||||
|
eval_chain vs (e_comp e1 e2) vs'' -> exists vs', (eval_chain vs e1 vs') /\ (eval_chain vs' e2 vs'').
|
||||||
|
Proof.
|
||||||
|
intros e1 e2 vs vss'' ch.
|
||||||
|
ltac1:(dependent induction ch).
|
||||||
|
- simpl in P. destruct (eval_step vs e1); inversion P.
|
||||||
|
- simpl in P. destruct (eval_step vs e1) eqn:Hval; try (inversion P).
|
||||||
|
+ injection P as Hinj; subst. specialize (IHch e e2 H0) as [s'0 [ch1 ch2]].
|
||||||
|
eexists. split.
|
||||||
|
* eapply chain_middle. apply Hval. apply ch1.
|
||||||
|
* apply ch2.
|
||||||
|
+ subst. eexists. split.
|
||||||
|
* eapply chain_final. apply Hval.
|
||||||
|
* apply ch.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem val_step_sem : forall (e : expr) (vs vs' : value_stack),
|
||||||
|
Sem_expr vs e vs' -> eval_chain vs e vs'
|
||||||
|
with eval_step_int : forall (i : intrinsic) (vs vs' : value_stack),
|
||||||
|
Sem_int vs i vs' -> eval_chain vs (e_int i) vs'.
|
||||||
|
Proof.
|
||||||
|
- intros e vs vs' Hsem.
|
||||||
|
induction Hsem.
|
||||||
|
+ (* This is an intrinsic, which is handled by the second
|
||||||
|
theorem, eval_step_int. This lemma is used here. *)
|
||||||
|
auto.
|
||||||
|
+ (* A quote doesn't have a next step, and so is final. *)
|
||||||
|
apply chain_final. auto.
|
||||||
|
+ (* In compoition, by induction, we know that the two sub-expressions produce
|
||||||
|
proper evaluation chains. Chains can be composed (via eval_chain_merge). *)
|
||||||
|
eapply eval_chain_merge with vs2; auto.
|
||||||
|
- intros i vs vs' Hsem.
|
||||||
|
(* The evaluation chain depends on the specific intrinsic in use. *)
|
||||||
|
inversion Hsem; subst;
|
||||||
|
(* Most intrinsics produce a final value, and the evaluation chain is trivial. *)
|
||||||
|
try (apply chain_final; auto; fail).
|
||||||
|
(* Only apply is non-final. The first step is popping the quote from the stack,
|
||||||
|
and the rest of the steps are given by the evaluation of the code in the quote. *)
|
||||||
|
apply chain_middle with e vs0; auto.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Ltac2 Type exn ::= [ | Not_intrinsic ].
|
||||||
|
|
||||||
|
Ltac2 rec destruct_n (n : int) (vs : constr) : unit :=
|
||||||
|
if Int.le n 0 then () else
|
||||||
|
let v := Fresh.in_goal @v in
|
||||||
|
let vs' := Fresh.in_goal @vs in
|
||||||
|
destruct $vs as [|$v $vs']; Control.enter (fun () =>
|
||||||
|
try (destruct_n (Int.sub n 1) (Control.hyp vs'))
|
||||||
|
).
|
||||||
|
|
||||||
|
Ltac2 int_arity (int : constr) : int :=
|
||||||
|
match! int with
|
||||||
|
| swap => 2
|
||||||
|
| clone => 1
|
||||||
|
| drop => 1
|
||||||
|
| quote => 1
|
||||||
|
| compose => 2
|
||||||
|
| apply => 1
|
||||||
|
| _ => Control.throw Not_intrinsic
|
||||||
|
end.
|
||||||
|
|
||||||
|
Ltac2 destruct_int_stack (int : constr) (va: constr) := destruct_n (int_arity int) va.
|
||||||
|
|
||||||
|
Ltac2 ensure_valid_stack () := Control.enter (fun () =>
|
||||||
|
match! goal with
|
||||||
|
| [h : eval_step ?a (e_int ?b) = ?c |- _] =>
|
||||||
|
let h := Control.hyp h in
|
||||||
|
destruct_int_stack b a;
|
||||||
|
try (inversion $h; fail)
|
||||||
|
| [|- _ ] => ()
|
||||||
|
end).
|
||||||
|
|
||||||
|
Theorem test : forall (vs vs': value_stack), eval_step vs (e_int swap) = final vs' ->
|
||||||
|
exists v1 v2 vs'', vs = v1 :: v2 :: vs'' /\ vs' = v2 :: v1 :: vs''.
|
||||||
|
Proof.
|
||||||
|
intros s s' Heq.
|
||||||
|
ensure_valid_stack ().
|
||||||
|
simpl in Heq. injection Heq as Hinj. subst. eauto.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem eval_step_final_sem : forall (e : expr) (vs vs' : value_stack),
|
||||||
|
eval_step vs e = final vs' -> Sem_expr vs e vs'.
|
||||||
|
Proof.
|
||||||
|
intros e vs vs' Hev. destruct e.
|
||||||
|
- destruct i; ensure_valid_stack ();
|
||||||
|
(* Get rid of trivial cases that match one-to-one. *)
|
||||||
|
simpl in Hev; try (injection Hev as Hinj; subst; solve_basic ()).
|
||||||
|
+ (* compose with one quoted value *)
|
||||||
|
destruct v. inversion Hev.
|
||||||
|
+ (* compose with two quoted values. *)
|
||||||
|
destruct v; destruct v0.
|
||||||
|
injection Hev as Hinj; subst; solve_basic ().
|
||||||
|
+ (* Apply is not final. *) destruct v. inversion Hev.
|
||||||
|
- (* Quote is always final, trivially. *)
|
||||||
|
simpl in Hev. injection Hev as Hinj; subst. solve_basic ().
|
||||||
|
- (* Compose is never final. *)
|
||||||
|
simpl in Hev. destruct (eval_step vs e1); inversion Hev.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem eval_step_middle_sem : forall (e ei: expr) (vs vsi vs' : value_stack),
|
||||||
|
eval_step vs e = middle ei vsi ->
|
||||||
|
Sem_expr vsi ei vs' ->
|
||||||
|
Sem_expr vs e vs'.
|
||||||
|
Proof.
|
||||||
|
intros e. induction e; intros ei vs vsi vs' Hev Hsem.
|
||||||
|
- destruct i; ensure_valid_stack ().
|
||||||
|
+ (* compose with one quoted value. *)
|
||||||
|
destruct v. inversion Hev.
|
||||||
|
+ (* compose with two quoted values. *)
|
||||||
|
destruct v; destruct v0. inversion Hev.
|
||||||
|
+ (* Apply *)
|
||||||
|
destruct v. injection Hev as Hinj; subst.
|
||||||
|
solve_basic (). auto.
|
||||||
|
- inversion Hev.
|
||||||
|
- simpl in Hev.
|
||||||
|
destruct (eval_step vs e1) eqn:Hev1.
|
||||||
|
+ inversion Hev.
|
||||||
|
+ injection Hev as Hinj; subst. inversion Hsem; subst.
|
||||||
|
specialize (IHe1 e vs vsi vs2 Hev1 H2).
|
||||||
|
eapply Sem_e_comp. apply IHe1. apply H4.
|
||||||
|
+ injection Hev as Hinj; subst.
|
||||||
|
specialize (eval_step_final_sem e1 vs vsi Hev1) as Hsem1.
|
||||||
|
eapply Sem_e_comp. apply Hsem1. apply Hsem.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem eval_step_sem_back : forall (e : expr) (vs vs' : value_stack),
|
||||||
|
eval_chain vs e vs' -> Sem_expr vs e vs'.
|
||||||
|
Proof.
|
||||||
|
intros e vs vs' ch.
|
||||||
|
ltac1:(dependent induction ch).
|
||||||
|
- apply eval_step_final_sem. auto.
|
||||||
|
- specialize (eval_step_middle_sem e ei vs vsi vs' P IHch). auto.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Corollary eval_step_no_sem : forall (e : expr) (vs vs' : value_stack),
|
||||||
|
~ (Sem_expr vs e vs') -> ~(eval_chain vs e vs').
|
||||||
|
Proof.
|
||||||
|
intros e vs vs' Hnsem Hch.
|
||||||
|
specialize (eval_step_sem_back _ _ _ Hch). auto.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Require Extraction.
|
||||||
|
Require Import ExtrHaskellBasic.
|
||||||
|
Extraction Language Haskell.
|
||||||
|
Set Extraction KeepSingleton.
|
||||||
|
Extraction "UccGen.hs" expr eval_step true false or.
|
||||||
179
code/dawn/DawnV2.v
Normal file
179
code/dawn/DawnV2.v
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
Require Import Coq.Lists.List.
|
||||||
|
From Ltac2 Require Import Ltac2.
|
||||||
|
|
||||||
|
Inductive intrinsic :=
|
||||||
|
| swap
|
||||||
|
| clone
|
||||||
|
| drop
|
||||||
|
| quote
|
||||||
|
| compose
|
||||||
|
| apply.
|
||||||
|
|
||||||
|
Inductive expr :=
|
||||||
|
| e_int (i : intrinsic)
|
||||||
|
| e_quote (e : expr)
|
||||||
|
| e_comp (e1 e2 : expr).
|
||||||
|
|
||||||
|
Definition e_compose (e : expr) (es : list expr) := fold_left e_comp es e.
|
||||||
|
|
||||||
|
Inductive value := v_quote (e : expr).
|
||||||
|
Definition value_stack := list value.
|
||||||
|
|
||||||
|
Definition value_to_expr (v : value) : expr :=
|
||||||
|
match v with
|
||||||
|
| v_quote e => e_quote e
|
||||||
|
end.
|
||||||
|
|
||||||
|
Inductive Sem_int : value_stack -> intrinsic -> value_stack -> Prop :=
|
||||||
|
| Sem_swap : forall (v v' : value) (vs : value_stack), Sem_int (v' :: v :: vs) swap (v :: v' :: vs)
|
||||||
|
| Sem_clone : forall (v : value) (vs : value_stack), Sem_int (v :: vs) clone (v :: v :: vs)
|
||||||
|
| Sem_drop : forall (v : value) (vs : value_stack), Sem_int (v :: vs) drop vs
|
||||||
|
| Sem_quote : forall (v : value) (vs : value_stack), Sem_int (v :: vs) quote ((v_quote (value_to_expr v)) :: vs)
|
||||||
|
| Sem_compose : forall (e e' : expr) (vs : value_stack), Sem_int (v_quote e' :: v_quote e :: vs) compose (v_quote (e_comp e e') :: vs)
|
||||||
|
| Sem_apply : forall (e : expr) (vs vs': value_stack), Sem_expr vs e vs' -> Sem_int (v_quote e :: vs) apply vs'
|
||||||
|
|
||||||
|
with Sem_expr : value_stack -> expr -> value_stack -> Prop :=
|
||||||
|
| Sem_e_int : forall (i : intrinsic) (vs vs' : value_stack), Sem_int vs i vs' -> Sem_expr vs (e_int i) vs'
|
||||||
|
| Sem_e_quote : forall (e : expr) (vs : value_stack), Sem_expr vs (e_quote e) (v_quote e :: vs)
|
||||||
|
| Sem_e_comp : forall (e1 e2 : expr) (vs1 vs2 vs3 : value_stack),
|
||||||
|
Sem_expr vs1 e1 vs2 -> Sem_expr vs2 e2 vs3 -> Sem_expr vs1 (e_comp e1 e2) vs3.
|
||||||
|
|
||||||
|
Definition false : expr := e_quote (e_int drop).
|
||||||
|
Definition false_v : value := v_quote (e_int drop).
|
||||||
|
|
||||||
|
Definition true : expr := e_quote (e_comp (e_int swap) (e_int drop)).
|
||||||
|
Definition true_v : value := v_quote (e_comp (e_int swap) (e_int drop)).
|
||||||
|
|
||||||
|
Theorem false_correct : forall (v v' : value) (vs : value_stack), Sem_expr (v' :: v :: vs) (e_comp false (e_int apply)) (v :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v v' vs.
|
||||||
|
eapply Sem_e_comp.
|
||||||
|
- apply Sem_e_quote.
|
||||||
|
- apply Sem_e_int. apply Sem_apply. apply Sem_e_int. apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem true_correct : forall (v v' : value) (vs : value_stack), Sem_expr (v' :: v :: vs) (e_comp true (e_int apply)) (v' :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v v' vs.
|
||||||
|
eapply Sem_e_comp.
|
||||||
|
- apply Sem_e_quote.
|
||||||
|
- apply Sem_e_int. apply Sem_apply. eapply Sem_e_comp.
|
||||||
|
* apply Sem_e_int. apply Sem_swap.
|
||||||
|
* apply Sem_e_int. apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Definition or : expr := e_comp (e_int clone) (e_int apply).
|
||||||
|
|
||||||
|
Theorem or_false_v : forall (v : value) (vs : value_stack), Sem_expr (false_v :: v :: vs) or (v :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v vs.
|
||||||
|
eapply Sem_e_comp...
|
||||||
|
- apply Sem_clone.
|
||||||
|
- apply Sem_apply... apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem or_true : forall (v : value) (vs : value_stack), Sem_expr (true_v :: v :: vs) or (true_v :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v vs.
|
||||||
|
eapply Sem_e_comp...
|
||||||
|
- apply Sem_clone...
|
||||||
|
- apply Sem_apply. eapply Sem_e_comp...
|
||||||
|
* apply Sem_swap.
|
||||||
|
* apply Sem_drop.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Definition or_false_false := or_false_v false_v.
|
||||||
|
Definition or_false_true := or_false_v true_v.
|
||||||
|
Definition or_true_false := or_true false_v.
|
||||||
|
Definition or_true_true := or_true true_v.
|
||||||
|
|
||||||
|
Fixpoint quote_n (n : nat) :=
|
||||||
|
match n with
|
||||||
|
| O => e_int quote
|
||||||
|
| S n' => e_compose (quote_n n') (e_int swap :: e_int quote :: e_int swap :: e_int compose :: nil)
|
||||||
|
end.
|
||||||
|
|
||||||
|
Theorem quote_2_correct : forall (v1 v2 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v2 :: v1 :: vs) (quote_n 1) (v_quote (e_comp (value_to_expr v1) (value_to_expr v2)) :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v1 v2 vs. simpl.
|
||||||
|
repeat (eapply Sem_e_comp)...
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem quote_3_correct : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (quote_n 2) (v_quote (e_comp (value_to_expr v1) (e_comp (value_to_expr v2) (value_to_expr v3))) :: vs).
|
||||||
|
Proof with apply Sem_e_int.
|
||||||
|
intros v1 v2 v3 vs. simpl.
|
||||||
|
repeat (eapply Sem_e_comp)...
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_quote.
|
||||||
|
- apply Sem_swap.
|
||||||
|
- apply Sem_compose.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Ltac2 rec solve_basic () := Control.enter (fun _ =>
|
||||||
|
match! goal with
|
||||||
|
| [|- Sem_int ?vs1 swap ?vs2] => apply Sem_swap
|
||||||
|
| [|- Sem_int ?vs1 clone ?vs2] => apply Sem_clone
|
||||||
|
| [|- Sem_int ?vs1 drop ?vs2] => apply Sem_drop
|
||||||
|
| [|- Sem_int ?vs1 quote ?vs2] => apply Sem_quote
|
||||||
|
| [|- Sem_int ?vs1 compose ?vs2] => apply Sem_compose
|
||||||
|
| [|- Sem_int ?vs1 apply ?vs2] => apply Sem_apply
|
||||||
|
| [|- Sem_expr ?vs1 (e_comp ?e1 ?e2) ?vs2] => eapply Sem_e_comp; solve_basic ()
|
||||||
|
| [|- Sem_expr ?vs1 (e_int ?e) ?vs2] => apply Sem_e_int; solve_basic ()
|
||||||
|
| [|- Sem_expr ?vs1 (e_quote ?e) ?vs2] => apply Sem_e_quote
|
||||||
|
| [_ : _ |- _] => ()
|
||||||
|
end).
|
||||||
|
|
||||||
|
Theorem quote_2_correct' : forall (v1 v2 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v2 :: v1 :: vs) (quote_n 1) (v_quote (e_comp (value_to_expr v1) (value_to_expr v2)) :: vs).
|
||||||
|
Proof. intros. simpl. solve_basic (). Qed.
|
||||||
|
|
||||||
|
Theorem quote_3_correct' : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (quote_n 2) (v_quote (e_comp (value_to_expr v1) (e_comp (value_to_expr v2) (value_to_expr v3))) :: vs).
|
||||||
|
Proof. intros. simpl. solve_basic (). Qed.
|
||||||
|
|
||||||
|
Definition rotate_n (n : nat) := e_compose (quote_n n) (e_int swap :: e_int quote :: e_int compose :: e_int apply :: nil).
|
||||||
|
|
||||||
|
Lemma eval_value : forall (v : value) (vs : value_stack),
|
||||||
|
Sem_expr vs (value_to_expr v) (v :: vs).
|
||||||
|
Proof.
|
||||||
|
intros v vs.
|
||||||
|
destruct v.
|
||||||
|
simpl. apply Sem_e_quote.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem rotate_3_correct : forall (v1 v2 v3 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v3 :: v2 :: v1 :: vs) (rotate_n 1) (v1 :: v3 :: v2 :: vs).
|
||||||
|
Proof.
|
||||||
|
intros. unfold rotate_n. simpl. solve_basic ().
|
||||||
|
repeat (eapply Sem_e_comp); apply eval_value.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem rotate_4_correct : forall (v1 v2 v3 v4 : value) (vs : value_stack),
|
||||||
|
Sem_expr (v4 :: v3 :: v2 :: v1 :: vs) (rotate_n 2) (v1 :: v4 :: v3 :: v2 :: vs).
|
||||||
|
Proof.
|
||||||
|
intros. unfold rotate_n. simpl. solve_basic ().
|
||||||
|
repeat (eapply Sem_e_comp); apply eval_value.
|
||||||
|
Qed.
|
||||||
|
|
||||||
|
Theorem e_comp_assoc : forall (e1 e2 e3 : expr) (vs vs' : value_stack),
|
||||||
|
Sem_expr vs (e_comp e1 (e_comp e2 e3)) vs' <-> Sem_expr vs (e_comp (e_comp e1 e2) e3) vs'.
|
||||||
|
Proof.
|
||||||
|
intros e1 e2 e3 vs vs'.
|
||||||
|
split; intros Heval.
|
||||||
|
- inversion Heval; subst. inversion H4; subst.
|
||||||
|
eapply Sem_e_comp. eapply Sem_e_comp. apply H2. apply H3. apply H6.
|
||||||
|
- inversion Heval; subst. inversion H2; subst.
|
||||||
|
eapply Sem_e_comp. apply H3. eapply Sem_e_comp. apply H6. apply H4.
|
||||||
|
Qed.
|
||||||
64
code/dawn/Ucc.hs
Normal file
64
code/dawn/Ucc.hs
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
module Ucc where
|
||||||
|
import UccGen
|
||||||
|
import Text.Parsec
|
||||||
|
import Data.Functor.Identity
|
||||||
|
import Control.Applicative hiding ((<|>))
|
||||||
|
import System.IO
|
||||||
|
|
||||||
|
instance Show Intrinsic where
|
||||||
|
show Swap = "swap"
|
||||||
|
show Clone = "clone"
|
||||||
|
show Drop = "drop"
|
||||||
|
show Quote = "quote"
|
||||||
|
show Compose = "compose"
|
||||||
|
show Apply = "apply"
|
||||||
|
|
||||||
|
instance Show Expr where
|
||||||
|
show (E_int i) = show i
|
||||||
|
show (E_quote e) = "[" ++ show e ++ "]"
|
||||||
|
show (E_comp e1 e2) = show e1 ++ " " ++ show e2
|
||||||
|
|
||||||
|
instance Show Value where
|
||||||
|
show (V_quote e) = show (E_quote e)
|
||||||
|
|
||||||
|
type Parser a = ParsecT String () Identity a
|
||||||
|
|
||||||
|
intrinsic :: Parser Intrinsic
|
||||||
|
intrinsic = (<* spaces) $ foldl1 (<|>) $ map (\(s, i) -> try (string s >> return i))
|
||||||
|
[ ("swap", Swap)
|
||||||
|
, ("clone", Clone)
|
||||||
|
, ("drop", Drop)
|
||||||
|
, ("quote", Quote)
|
||||||
|
, ("compose", Compose)
|
||||||
|
, ("apply", Apply)
|
||||||
|
]
|
||||||
|
|
||||||
|
expression :: Parser Expr
|
||||||
|
expression = foldl1 E_comp <$> many1 single
|
||||||
|
where
|
||||||
|
single
|
||||||
|
= (E_int <$> intrinsic)
|
||||||
|
<|> (fmap E_quote $ char '[' *> spaces *> expression <* char ']' <* spaces)
|
||||||
|
|
||||||
|
parseExpression :: String -> Either ParseError Expr
|
||||||
|
parseExpression = runParser expression () "<inline>"
|
||||||
|
|
||||||
|
eval :: [Value] -> Expr -> Maybe [Value]
|
||||||
|
eval s e =
|
||||||
|
case eval_step s e of
|
||||||
|
Err -> Nothing
|
||||||
|
Final s' -> Just s'
|
||||||
|
Middle e' s' -> eval s' e'
|
||||||
|
|
||||||
|
main :: IO ()
|
||||||
|
main = do
|
||||||
|
putStr "> "
|
||||||
|
hFlush stdout
|
||||||
|
str <- getLine
|
||||||
|
case parseExpression str of
|
||||||
|
Right e ->
|
||||||
|
case eval [] e of
|
||||||
|
Just st -> putStrLn $ show st
|
||||||
|
_ -> putStrLn "Evaluation error"
|
||||||
|
_ -> putStrLn "Parse error"
|
||||||
|
main
|
||||||
23
code/typescript-emitter/js1.js
Normal file
23
code/typescript-emitter/js1.js
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
class EventEmitter {
|
||||||
|
constructor() {
|
||||||
|
this.handlers = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
emit(event) {
|
||||||
|
this.handlers[event]?.forEach(h => h());
|
||||||
|
}
|
||||||
|
|
||||||
|
addHandler(event, handler) {
|
||||||
|
if(!this.handlers[event]) {
|
||||||
|
this.handlers[event] = [handler];
|
||||||
|
} else {
|
||||||
|
this.handlers[event].push(handler);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const emitter = new EventEmitter();
|
||||||
|
emitter.addHandler("start", () => console.log("Started!"));
|
||||||
|
emitter.addHandler("end", () => console.log("Ended!"));
|
||||||
|
emitter.emit("end");
|
||||||
|
emitter.emit("start");
|
||||||
23
code/typescript-emitter/js2.js
Normal file
23
code/typescript-emitter/js2.js
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
class EventEmitter {
|
||||||
|
constructor() {
|
||||||
|
this.handlers = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
emit(event, value) {
|
||||||
|
this.handlers[event]?.forEach(h => h(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
addHandler(event, handler) {
|
||||||
|
if(!this.handlers[event]) {
|
||||||
|
this.handlers[event] = [handler];
|
||||||
|
} else {
|
||||||
|
this.handlers[event].push(handler);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const emitter = new EventEmitter();
|
||||||
|
emitter.addHandler("numberChange", n => console.log("New number value is: ", n));
|
||||||
|
emitter.addHandler("stringChange", s => console.log("New string value is: ", s));
|
||||||
|
emitter.emit("numberChange", 1);
|
||||||
|
emitter.emit("stringChange", "3");
|
||||||
27
code/typescript-emitter/ts.ts
Normal file
27
code/typescript-emitter/ts.ts
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
class EventEmitter<T> {
|
||||||
|
private handlers: { [eventName in keyof T]?: ((value: T[eventName]) => void)[] }
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.handlers = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
emit<K extends keyof T>(event: K, value: T[K]): void {
|
||||||
|
this.handlers[event]?.forEach(h => h(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
addHandler<K extends keyof T>(event: K, handler: (value: T[K]) => void): void {
|
||||||
|
if(!this.handlers[event]) {
|
||||||
|
this.handlers[event] = [handler];
|
||||||
|
} else {
|
||||||
|
this.handlers[event].push(handler);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const emitter = new EventEmitter<{ numberChange: number, stringChange: string }>();
|
||||||
|
emitter.addHandler("numberChange", n => console.log("New number value is: ", n));
|
||||||
|
emitter.addHandler("stringChange", s => console.log("New string value is: ", s));
|
||||||
|
emitter.emit("numberChange", 1);
|
||||||
|
emitter.emit("stringChange", "3");
|
||||||
|
emitter.emit("numberChange", "1");
|
||||||
|
emitter.emit("stringChange", 3);
|
||||||
5
config-gen.toml
Normal file
5
config-gen.toml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[params]
|
||||||
|
[params.submoduleLinks]
|
||||||
|
[params.submoduleLinks.aoc2020]
|
||||||
|
url = "https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020/src/commit/7a8503c3fe1aa7e624e4d8672aa9b56d24b4ba82"
|
||||||
|
path = "aoc-2020"
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
---
|
---
|
||||||
title: About
|
title: About
|
||||||
---
|
---
|
||||||
|
{{< donate_css >}}
|
||||||
|
|
||||||
I'm Daniel, a Computer Science student currently working towards my Master's Degree at Oregon State University.
|
I'm Daniel, a Computer Science student currently working towards my Master's Degree at Oregon State University.
|
||||||
Due to my initial interest in calculators and compilers, I got involved in the Programming Language Theory research
|
Due to my initial interest in calculators and compilers, I got involved in the Programming Language Theory research
|
||||||
group, gaining same experience in formal verification, domain specific language, and explainable computing.
|
group, gaining same experience in formal verification, domain specific language, and explainable computing.
|
||||||
@@ -8,3 +10,34 @@ group, gaining same experience in formal verification, domain specific language,
|
|||||||
For work, school, and hobby projects, I use a variety of programming languages, most commonly C/C++,
|
For work, school, and hobby projects, I use a variety of programming languages, most commonly C/C++,
|
||||||
Haskell, [Crystal](https://crystal-lang.org/), and [Elm](https://elm-lang.org/). I also have experience
|
Haskell, [Crystal](https://crystal-lang.org/), and [Elm](https://elm-lang.org/). I also have experience
|
||||||
with Java, Python, Haxe, and JavaScript.
|
with Java, Python, Haxe, and JavaScript.
|
||||||
|
|
||||||
|
A few notes about me or this site:
|
||||||
|
* __Correctness__: I mostly write technical content. Even though I proofread my articles, there's
|
||||||
|
always a fairly good chance that I'm wrong. You should always use your best judgement when reading
|
||||||
|
anything on this site -- if something seems wrong, it may very well be. I'm far from an expert.
|
||||||
|
* __Schedule__: I do not have a set post schedule. There are many reasons for this:
|
||||||
|
schoolwork, personal life, lack of inspiration. It also takes a _very_ long time for
|
||||||
|
me to write a single article. My article on [polymorphic type checking]({{< relref "/blog/10_compiler_polymorphism.md" >}})
|
||||||
|
is around 8,000 words long; besides writing it, I have to edit it, link up all the code
|
||||||
|
references, and proofread the final result. And of course, I need to write the code and
|
||||||
|
occasionally do some research.
|
||||||
|
* __Design__: I am doing my best to keep this website accessible and easy on the eyes.
|
||||||
|
I'm also doing my best to avoid any and all uses of JavaScript. I used to use a lot of
|
||||||
|
uMatrix, and most of the websites I browsed during this time were broken. Similarly,
|
||||||
|
a lot of websites were unusable on my weaker machines. So, I'm doing my part and
|
||||||
|
making this site usable without any JavaScript, and, as it seems to me, even
|
||||||
|
without any CSS.
|
||||||
|
* __Source code__: This blog is open source, but not on GitHub. Instead,
|
||||||
|
you can find the code on my [Gitea instance](https://dev.danilafe.com/Web-Projects/blog-static).
|
||||||
|
If you use this code for your own site, I would prefer that you don't copy the theme.
|
||||||
|
|
||||||
|
### Donate
|
||||||
|
I don't run ads, nor do I profit from writing anything on here. I have no trouble paying for hosting,
|
||||||
|
and I write my articles voluntarily, for my own enjoyment. However, if you found something particularly
|
||||||
|
helpful on here, and would like to buy me a cup of coffee or help host the site, you can donate using
|
||||||
|
the method(s) below.
|
||||||
|
|
||||||
|
{{< donation_methods >}}
|
||||||
|
{{< donation_method "Bitcoin" "1BbXPZhdzv4xHq5LYhme3xBiUsHw5fmafd" >}}
|
||||||
|
{{< donation_method "Ethereum" "0xd111E49344bEC80570e68EE0A00b87B1EFcb5D56" >}}
|
||||||
|
{{< /donation_methods >}}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
title: "Advent of Code in Coq - Day 1"
|
title: "Advent of Code in Coq - Day 1"
|
||||||
date: 2020-12-02T18:44:56-08:00
|
date: 2020-12-02T18:44:56-08:00
|
||||||
tags: ["Advent of Code", "Coq"]
|
tags: ["Advent of Code", "Coq"]
|
||||||
|
favorite: true
|
||||||
---
|
---
|
||||||
|
|
||||||
The first puzzle of this year's [Advent of Code](https://adventofcode.com) was quite
|
The first puzzle of this year's [Advent of Code](https://adventofcode.com) was quite
|
||||||
@@ -25,7 +26,7 @@ let's write a helper function that, given a number `x`, tries to find another nu
|
|||||||
`y` such that `x + y = 2020`. In fact, rather than hardcoding the desired
|
`y` such that `x + y = 2020`. In fact, rather than hardcoding the desired
|
||||||
sum to `2020`, let's just use another argument called `total`. The code is quite simple:
|
sum to `2020`, let's just use another argument called `total`. The code is quite simple:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 7 14 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 11 18 >}}
|
||||||
|
|
||||||
Here, `is` is the list of numbers that we want to search.
|
Here, `is` is the list of numbers that we want to search.
|
||||||
We proceed by case analysis: if the list is empty, we can't
|
We proceed by case analysis: if the list is empty, we can't
|
||||||
@@ -42,7 +43,7 @@ for our purposes when the argument being case analyzed is given first.
|
|||||||
We can now use `find_matching` to define our `find_sum` function, which solves part 1.
|
We can now use `find_matching` to define our `find_sum` function, which solves part 1.
|
||||||
Here's the code:
|
Here's the code:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 16 24 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 20 28 >}}
|
||||||
|
|
||||||
For every `x` that we encounter in our input list `is`, we want to check if there's
|
For every `x` that we encounter in our input list `is`, we want to check if there's
|
||||||
a matching number in the rest of the list. We only search the remainder of the list
|
a matching number in the rest of the list. We only search the remainder of the list
|
||||||
@@ -71,13 +72,13 @@ formally as follows:
|
|||||||
|
|
||||||
And this is how we write it in Coq:
|
And this is how we write it in Coq:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 26 27 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 30 31 >}}
|
||||||
|
|
||||||
The arrow, `->`, reads "implies". Other than that, I think this
|
The arrow, `->`, reads "implies". Other than that, I think this
|
||||||
property reads pretty well. The proof, unfortunately, is a little bit more involved.
|
property reads pretty well. The proof, unfortunately, is a little bit more involved.
|
||||||
Here are the first few lines:
|
Here are the first few lines:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 28 31 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 32 35 >}}
|
||||||
|
|
||||||
We start with the `intros is` tactic, which is akin to saying
|
We start with the `intros is` tactic, which is akin to saying
|
||||||
"consider a particular list of integers `is`". We do this without losing
|
"consider a particular list of integers `is`". We do this without losing
|
||||||
@@ -156,7 +157,7 @@ is zero. This means we're done with the base case!
|
|||||||
The inductive case is the meat of this proof. Here's the corresponding part
|
The inductive case is the meat of this proof. Here's the corresponding part
|
||||||
of the Coq source file:
|
of the Coq source file:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 32 36 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 36 40 >}}
|
||||||
|
|
||||||
This time, the proof state is more complicated:
|
This time, the proof state is more complicated:
|
||||||
|
|
||||||
@@ -283,14 +284,14 @@ Coq proofs is to actually step through them in the IDE!
|
|||||||
|
|
||||||
First on the list is `find_matching_skip`. Here's the type:
|
First on the list is `find_matching_skip`. Here's the type:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 38 39 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 42 43 >}}
|
||||||
|
|
||||||
It reads: if we correctly find a number in a small list `is`, we can find that same number
|
It reads: if we correctly find a number in a small list `is`, we can find that same number
|
||||||
even if another number is prepended to `is`. That makes sense: _adding_ a number to
|
even if another number is prepended to `is`. That makes sense: _adding_ a number to
|
||||||
a list doesn't remove whatever we found in it! I used this lemma to prove another,
|
a list doesn't remove whatever we found in it! I used this lemma to prove another,
|
||||||
`find_matching_works`:
|
`find_matching_works`:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 49 50 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 53 54 >}}
|
||||||
|
|
||||||
This reads, if there _is_ an element `y` in `is` that adds up to `k` with `x`, then
|
This reads, if there _is_ an element `y` in `is` that adds up to `k` with `x`, then
|
||||||
`find_matching` will find it. This is an important property. After all, if it didn't
|
`find_matching` will find it. This is an important property. After all, if it didn't
|
||||||
@@ -309,7 +310,7 @@ that all lists from this Advent of Code puzzle are guaranteed to have two number
|
|||||||
add up to our goal, and that these numbers are not equal to each other. In Coq,
|
add up to our goal, and that these numbers are not equal to each other. In Coq,
|
||||||
we state this as follows:
|
we state this as follows:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 4 5 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 8 9 >}}
|
||||||
|
|
||||||
This defines a new property, `has_pair t is` (read "`is` has a pair of numbers that add to `t`"),
|
This defines a new property, `has_pair t is` (read "`is` has a pair of numbers that add to `t`"),
|
||||||
which means:
|
which means:
|
||||||
@@ -322,7 +323,7 @@ which means:
|
|||||||
When making claims about the correctness of our algorithm, we will assume that this
|
When making claims about the correctness of our algorithm, we will assume that this
|
||||||
property holds. Finally, here's the theorem we want to prove:
|
property holds. Finally, here's the theorem we want to prove:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 64 66 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 68 70 >}}
|
||||||
|
|
||||||
It reads, "for any total `k` and list `is`, if `is` has a pair of numbers that add to `k`,
|
It reads, "for any total `k` and list `is`, if `is` has a pair of numbers that add to `k`,
|
||||||
then `find_sum` will return a pair of numbers `x` and `y` that add to `k`".
|
then `find_sum` will return a pair of numbers `x` and `y` that add to `k`".
|
||||||
@@ -334,7 +335,7 @@ we want to confirm that `find_sum` will find one of them. Finally, here is the p
|
|||||||
I will not be able to go through it in detail in this post, but I did comment it to
|
I will not be able to go through it in detail in this post, but I did comment it to
|
||||||
make it easier to read:
|
make it easier to read:
|
||||||
|
|
||||||
{{< codelines "Coq" "aoc-coq/day1.v" 67 102 >}}
|
{{< codelines "Coq" "aoc-2020/day1.v" 71 106 >}}
|
||||||
|
|
||||||
Coq seems happy with it, and so am I! The bug I mentioned earlier popped up on line 96.
|
Coq seems happy with it, and so am I! The bug I mentioned earlier popped up on line 96.
|
||||||
I had accidentally made `find_sum` return `None` if it couldn't find a complement
|
I had accidentally made `find_sum` return `None` if it couldn't find a complement
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: A Language for an Assignment - Homework 1
|
title: A Language for an Assignment - Homework 1
|
||||||
date: 2019-12-27T23:27:09-08:00
|
date: 2019-12-27T23:27:09-08:00
|
||||||
tags: ["Haskell", "Python", "Algorithms"]
|
tags: ["Haskell", "Python", "Algorithms", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
On a rainy Oregon day, I was walking between classes with a group of friends.
|
On a rainy Oregon day, I was walking between classes with a group of friends.
|
||||||
|
|||||||
940
content/blog/01_aoc_coq.md
Normal file
940
content/blog/01_aoc_coq.md
Normal file
@@ -0,0 +1,940 @@
|
|||||||
|
---
|
||||||
|
title: "Advent of Code in Coq - Day 8"
|
||||||
|
date: 2021-01-10T22:48:39-08:00
|
||||||
|
tags: ["Advent of Code", "Coq"]
|
||||||
|
---
|
||||||
|
|
||||||
|
Huh? We're on day 8? What happened to days 2 through 7?
|
||||||
|
|
||||||
|
Well, for the most part, I didn't think they were that interesting from the Coq point of view.
|
||||||
|
Day 7 got close, but not close enough to inspire me to create a formalization. Day 8, on the other
|
||||||
|
hand, is
|
||||||
|
{{< sidenote "right" "pl-note" "quite interesting," >}}
|
||||||
|
Especially to someone like me who's interested in programming languages!
|
||||||
|
{{< /sidenote >}} and took quite some time to formalize.
|
||||||
|
|
||||||
|
As before, here's an (abridged) description of the problem:
|
||||||
|
|
||||||
|
> Given a tiny assembly-like language, determine the state of its accumulator
|
||||||
|
> when the same instruction is executed twice.
|
||||||
|
|
||||||
|
Before we start on the Coq formalization, let's talk about an idea from
|
||||||
|
Programming Language Theory (PLT), _big step operational semantics_.
|
||||||
|
|
||||||
|
### Big Step Operational Semantics
|
||||||
|
What we have in Advent of Code's Day 8 is, undeniably, a small programming language.
|
||||||
|
We are tasked with executing this language, or, in PLT lingo, defining its _semantics_.
|
||||||
|
There are many ways of doing this - at university, I've been taught of [denotational](https://en.wikipedia.org/wiki/Denotational_semantics), [axiomatic](https://en.wikipedia.org/wiki/Axiomatic_semantics),
|
||||||
|
and [operational](https://en.wikipedia.org/wiki/Operational_semantics) semantics.
|
||||||
|
I believe that Coq's mechanism of inductive definitions lends itself very well
|
||||||
|
to operational semantics, so we'll take that route. But even "operational semantics"
|
||||||
|
doesn't refer to a concrete technique - we have a choice between small-step (structural) and
|
||||||
|
big-step (natural) operational semantics. The former describe the minimal "steps" a program
|
||||||
|
takes as it's being evaluated, while the latter define the final results of evaluating a program.
|
||||||
|
I decided to go with big-step operational semantics, since they're more intutive (natural!).
|
||||||
|
|
||||||
|
So, how does one go about "[defining] the final results of evaluating a program?" Most commonly,
|
||||||
|
we go about using _inference rules_. Let's talk about those next.
|
||||||
|
|
||||||
|
#### Inference Rules
|
||||||
|
Inference rules are a very general notion. The describe how we can determine (infer) a conclusion
|
||||||
|
from a set of assumptions. It helps to look at an example. Here's a silly little inference rule:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{\text{I'm allergic to cats} \quad \text{My friend has a cat}}
|
||||||
|
{\text{I will not visit my friend very much}}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
It reads, "if I'm allergic to cats, and if my friend has a cat, then I will not visit my friend very much".
|
||||||
|
Here, "I'm allergic to cats" and "my friend has a cat" are _premises_, and "I will not visit my friend very much" is
|
||||||
|
a _conclusion_. An inference rule states that if all its premises are true, then its conclusion must be true.
|
||||||
|
Here's another inference rule, this time with some mathematical notation instead of words:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{n < m}
|
||||||
|
{n + 1 < m + 1}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
This one reads, "if \\(n\\) is less than \\(m\\), then \\(n+1\\) is less than \\(m+1\\)". We can use inference
|
||||||
|
rules to define various constructs. As an example, let's define what it means for a natural number to be even.
|
||||||
|
It takes two rules:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{}
|
||||||
|
{0 \; \text{is even}}
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{n \; \text{is even}}
|
||||||
|
{n+2 \; \text{is even}}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
First of all, zero is even. We take this as fact - there are no premises for the first rule, so they
|
||||||
|
are all trivially true. Next, if a number is even, then adding 2 to that number results in another
|
||||||
|
even number. Using the two of these rules together, we can correctly determine whether any number
|
||||||
|
is or isn't even. We start knowing that 0 is even. Adding 2 we learn that 2 is even, and adding 2
|
||||||
|
again we see that 4 is even, as well. We can continue this to determine that 6, 8, 10, and so on
|
||||||
|
are even too. Never in this process will we visit the numbers 1 or 3 or 5, and that's good - they're not even!
|
||||||
|
|
||||||
|
Let's now extend this notion to programming languages, starting with a simple arithmetic language.
|
||||||
|
This language is made up of natural numbers and the \\(\square\\) operation, which represents the addition
|
||||||
|
of two numbers. Again, we need two rules:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{n \in \mathbb{N}}
|
||||||
|
{n \; \text{evaluates to} \; n}
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{e_1 \; \text{evaluates to} \; n_1 \quad e_2 \; \text{evaluates to} \; n_2}
|
||||||
|
{e_1 \square e_2 \; \text{evaluates to} \; n_1 + n_2}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
First, let me explain myself. I used \\(\square\\) to demonstrate two important points. First, languages can be made of
|
||||||
|
any kind of characters we want; it's the rules that we define that give these languages meaning.
|
||||||
|
Second, while \\(\square\\) is the addition operation _in our language_, \\(+\\) is the _mathematical addition operator_.
|
||||||
|
They are not the same - we use the latter to define how the former works.
|
||||||
|
|
||||||
|
Finally, writing "evaluates to" gets quite tedious, especially for complex languages. Instead,
|
||||||
|
PLT people use notation to make their semantics more concise. The symbol \\(\Downarrow\\) is commonly
|
||||||
|
used to mean "evaluates to"; thus, \\(e \Downarrow v\\) reads "the expression \\(e\\) evaluates to the value \\(v\\).
|
||||||
|
Using this notation, our rules start to look like the following:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{n \in \mathbb{N}}
|
||||||
|
{n \Downarrow n}
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{e_1 \Downarrow n_1 \quad e_2 \Downarrow n_2}
|
||||||
|
{e_1 \square e_2 \Downarrow n_1 + n_2}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
If nothing else, these are way more compact! Though these may look intimidating at first, it helps to
|
||||||
|
simply read each symbol as its English meaning.
|
||||||
|
|
||||||
|
#### Encoding Inference Rules in Coq
|
||||||
|
Now that we've seen what inference rules are, we can take a look at how they can be represented in Coq.
|
||||||
|
We can use Coq's `Inductive` mechanism to define the rules. Let's start with our "is even" property.
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Inductive is_even : nat -> Prop :=
|
||||||
|
| zero_even : is_even 0
|
||||||
|
| plustwo_even : is_even n -> is_even (n+2).
|
||||||
|
```
|
||||||
|
|
||||||
|
The first line declares the property `is_even`, which, given a natural number, returns proposition.
|
||||||
|
This means that `is_even` is not a proposition itself, but `is_even 0`, `is_even 1`, and `is_even 2`
|
||||||
|
are all propositions.
|
||||||
|
|
||||||
|
The following two lines each encode one of our aforementioned inference rules. The first rule, `zero_even`,
|
||||||
|
is of type `is_even 0`. The `zero_even` rule doesn't require any arguments, and we can use it to create
|
||||||
|
a proof that 0 is even. On the other hand, the `plustwo_even` rule _does_ require an argument, `is_even n`.
|
||||||
|
To construct a proof that a number `n+2` is even using `plustwo_even`, we need to provide a proof
|
||||||
|
that `n` itself is even. From this definition we can see a general principle: we encode each inference
|
||||||
|
rule as constructor of an inductive Coq type. Each rule encoded in this manner takes as arguments
|
||||||
|
the proofs of its premises, and returns a proof of its conclusion.
|
||||||
|
|
||||||
|
For another example, let's encode our simple addition language. First, we have to define the language
|
||||||
|
itself:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Inductive tinylang : Type :=
|
||||||
|
| number (n : nat) : tinylang
|
||||||
|
| box (e1 e2 : tinylang) : tinylang.
|
||||||
|
```
|
||||||
|
|
||||||
|
This defines the two elements of our example language: `number n` corresponds to \\(n\\), and `box e1 e2` corresponds
|
||||||
|
to \\(e_1 \square e_2\\). Finally, we define the inference rules:
|
||||||
|
|
||||||
|
```Coq {linenos=true}
|
||||||
|
Inductive tinylang_sem : tinylang -> nat -> Prop :=
|
||||||
|
| number_sem : forall (n : nat), tinylang_sem (number n) n
|
||||||
|
| box_sem : forall (e1 e2 : tinylang) (n1 n2 : nat),
|
||||||
|
tinylang_sem e1 n1 -> tinylang_sem e2 n2 ->
|
||||||
|
tinylang_sem (box e1 e2) (n1 + n2).
|
||||||
|
```
|
||||||
|
|
||||||
|
When we wrote our rules earlier, by using arbitrary variables like \\(e_1\\) and \\(n_1\\), we implicitly meant
|
||||||
|
that our rules work for _any_ number or expression. When writing Coq we have to make this assumption explicit
|
||||||
|
by using `forall`. For instance, the rule on line 2 reads, "for any number `n`, the expression `n` evaluates to `n`".
|
||||||
|
|
||||||
|
#### Semantics of Our Language
|
||||||
|
|
||||||
|
We've now written some example big-step operational semantics, both "on paper" and in Coq. Now, it's time to take a look at
|
||||||
|
the specific semantics of the language from Day 8! Our language consists of a few parts.
|
||||||
|
|
||||||
|
First, there are three opcodes: \\(\texttt{jmp}\\), \\(\\texttt{nop}\\), and \\(\\texttt{add}\\). Opcodes, combined
|
||||||
|
with an integer, make up an instruction. For example, the instruction \\(\\texttt{add} \\; 3\\) will increase the
|
||||||
|
content of the accumulator by three. Finally, a program consists of a sequence of instructions; They're separated
|
||||||
|
by newlines in the puzzle input, but we'll instead separate them by semicolons. For example, here's a complete program.
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\texttt{add} \; 0; \; \texttt{nop} \; 2; \; \texttt{jmp} \; -2
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
Now, let's try evaluating this program. Starting at the beginning and with 0 in the accumulator,
|
||||||
|
it will add 0 to the accumulator (keeping it the same),
|
||||||
|
do nothing, and finally jump back to the beginning. At this point, it will try to run the addition instruction again,
|
||||||
|
which is not allowed; thus, the program will terminate.
|
||||||
|
|
||||||
|
Did you catch that? The semantics of this language will require more information than just our program itself (which we'll denote by \\(p\\)).
|
||||||
|
* First, to evaluate the program we will need a program counter, \\(\\textit{c}\\). This program counter
|
||||||
|
will tell us the position of the instruction to be executed next. It can also point past the last instruction,
|
||||||
|
which means our program terminated successfully.
|
||||||
|
* Next, we'll need the accumulator \\(a\\). Addition instructions can change the accumulator, and we will be interested
|
||||||
|
in the number that ends up in the accumulator when our program finishes executing.
|
||||||
|
* Finally, and more subtly, we'll need to keep track of the states we visited. For instance,
|
||||||
|
in the course of evaluating our program above, we encounter the \\((c, a)\\) pair of \\((0, 0)\\) twice: once
|
||||||
|
at the beginning, and once at the end. However, whereas at the beginning we have not yet encountered the addition
|
||||||
|
instruction, at the end we have, so the evaluation behaves differently. To make the proofs work better in Coq,
|
||||||
|
we'll use a set \\(v\\) of
|
||||||
|
{{< sidenote "right" "allowed-note" "allowed (valid) program counters (as opposed to visited program counters)." >}}
|
||||||
|
Whereas the set of "visited" program counters keeps growing as our evaluation continues,
|
||||||
|
the set of "allowed" program counters keeps shrinking. Because the "allowed" set never stops shrinking,
|
||||||
|
assuming we're starting with a finite set, our execution will eventually terminate.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
|
||||||
|
Now we have all the elements of our evaluation. Let's define some notation. A program starts at some state,
|
||||||
|
and terminates in another, possibly different state. In the course of a regular evaluation, the program
|
||||||
|
never changes; only the state does. So I propose this (rather unorthodox) notation:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
(c, a, v) \Rightarrow_p (c', a', v')
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
This reads, "after starting at program counter \\(c\\), accumulator \\(a\\), and set of valid addresses \\(v\\),
|
||||||
|
the program \\(p\\) terminates with program counter \\(c'\\), accumulator \\(a'\\), and set of valid addresses \\(v'\\)".
|
||||||
|
Before creating the inference rules for this evaluation relation, let's define the effect of evaluating a single
|
||||||
|
instruction, using notation \\((c, a) \rightarrow_i (c', a')\\). An addition instruction changes the accumulator,
|
||||||
|
and increases the program counter by 1.
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{}
|
||||||
|
{(c, a) \rightarrow_{\texttt{add} \; n} (c+1, a+n)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
A no-op instruction does even less. All it does is increment the program counter.
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{}
|
||||||
|
{(c, a) \rightarrow_{\texttt{nop} \; n} (c+1, a)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
Finally, a jump instruction leaves the accumulator intact, but adds a number to the program counter itself!
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{}
|
||||||
|
{(c, a) \rightarrow_{\texttt{jmp} \; n} (c+n, a)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
None of these rules have any premises, and they really are quite simple. Now, let's define the rules
|
||||||
|
for evaluating a program. First of all, a program starting in a state that is not considered "valid"
|
||||||
|
is done evaluating, and is in a "failed" state.
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{c \not \in v \quad c \not= \text{length}(p)}
|
||||||
|
{(c, a, v) \Rightarrow_{p} (c, a, v)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
We use \\(\\text{length}(p)\\) to represent the number of instructions in \\(p\\). Note the second premise:
|
||||||
|
even if our program counter \\(c\\) is not included in the valid set, if it's "past the end of the program",
|
||||||
|
the program terminates in an "ok" state.
|
||||||
|
{{< sidenote "left" "avoid-c-note" "Here's a rule for terminating in the \"ok\" state:" >}}
|
||||||
|
In the presented rule, we don't use the variable <code>c</code> all that much, and we know its concrete
|
||||||
|
value (from the equality premise). We could thus avoid introducing the name \(c\) by
|
||||||
|
replacing it with said known value:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{}
|
||||||
|
{(\text{length}(p), a, v) \Rightarrow_{p} (\text{length}(p), a, v)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
This introduces some duplication, but that is really because all "base case" evaluation rules
|
||||||
|
start and stop in the same state. To work around this, we could define a separate proposition
|
||||||
|
to mean "program \(p\) is done in state \(s\)", then \(s\) will really only need to occur once,
|
||||||
|
and so will \(\text{length}(p)\). This is, in fact, what we will do later on,
|
||||||
|
since being able to talk abut "programs being done" will help us with
|
||||||
|
components of our proof.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{c = \text{length}(p)}
|
||||||
|
{(c, a, v) \Rightarrow_{p} (c, a, v)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
When our program counter reaches the end of the program, we are also done evaluating it. Even though
|
||||||
|
both rules {{< sidenote "right" "redundant-note" "lead to the same conclusion," >}}
|
||||||
|
In fact, if the end of the program is never included in the valid set, the second rule is completely redundant.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
it helps to distinguish the two possible outcomes. Finally, if neither of the termination conditions are met,
|
||||||
|
our program can take a step, and continue evaluating from there.
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac{c \in v \quad p[c] = i \quad (c, a) \rightarrow_i (c', a') \quad (c', a', v - \{c\}) \Rightarrow_p (c'', a'', v'')}
|
||||||
|
{(c, a, v) \Rightarrow_{p} (c'', a'', v'')}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
This is quite a rule. A lot of things need to work out for a program to evauate from a state that isn't
|
||||||
|
currently the final state:
|
||||||
|
|
||||||
|
* The current program counter \\(c\\) must be valid. That is, it must be an element of \\(v\\).
|
||||||
|
* This program counter must correspond to an instruction \\(i\\) in \\(p\\), which we write as \\(p[c] = i\\).
|
||||||
|
* This instruction must be executed, changing our program counter from \\(c\\) to \\(c'\\) and our
|
||||||
|
accumulator from \\(a\\) to \\(a'\\). The set of valid instructions will no longer include \\(c\\),
|
||||||
|
and will become \\(v - \\{c\\}\\).
|
||||||
|
* Our program must then finish executing, starting at state
|
||||||
|
\\((c', a', v - \\{c\\})\\), and ending in some (unknown) state \\((c'', a'', v'')\\).
|
||||||
|
|
||||||
|
If all of these conditions are met, our program, starting at \\((c, a, v)\\), will terminate in the state \\((c'', a'', v'')\\). This third rule completes our semantics; a program being executed will keep running instructions using the third rule, until it finally
|
||||||
|
hits an invalid program counter (terminating with the first rule) or gets to the end of the program (terminating with the second rule).
|
||||||
|
|
||||||
|
#### Aside: Vectors and Finite \\(\mathbb{N}\\)
|
||||||
|
We'll be getting to the Coq implementation of our semantics soon, but before we do:
|
||||||
|
what type should \\(c\\) be? It's entirely possible for an instruction like \\(\\texttt{jmp} \\; -10000\\)
|
||||||
|
to throw our program counter way before the first instruction of our program, so at first, it seems
|
||||||
|
as though we should use an integer. But the prompt doesn't even specify what should happen in this
|
||||||
|
case - it only says an instruction shouldn't be run twice. The "valid set", although it may help resolve
|
||||||
|
this debate, is our invention, and isn't part of the original specification.
|
||||||
|
|
||||||
|
There is, however, something we can infer from this problem. Since the problem of jumping "too far behind" or
|
||||||
|
"too far ahead" is never mentioned, we can assume that _all jumps will lead either to an instruction,
|
||||||
|
or right to the end of a program_. This means that \\(c\\) is a natural number, with
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
0 \leq c \leq \text{length}(p)
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
In a language like Coq, it's possible to represent such a number. Since we've gotten familliar with
|
||||||
|
inference rules, let's present two rules that define such a number:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{n \in \mathbb{N}^+}
|
||||||
|
{Z : \text{Fin} \; n}
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{f : \text{Fin} \; n}
|
||||||
|
{S f : \text{Fin} \; (n+1)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
This is a variation of the [Peano encoding](https://wiki.haskell.org/Peano_numbers) of natural numbers.
|
||||||
|
It reads as follows: zero (\\(Z\\)) is a finite natural number less than any positive natural number \\(n\\). Then, if a finite natural number
|
||||||
|
\\(f\\) is less than \\(n\\), then adding one to that number (using the successor function \\(S\\))
|
||||||
|
will create a natural number less than \\(n+1\\). We encode this in Coq as follows
|
||||||
|
([originally from here](https://coq.inria.fr/library/Coq.Vectors.Fin.html#t)):
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Inductive t : nat -> Set :=
|
||||||
|
| F1 : forall {n}, t (S n)
|
||||||
|
| FS : forall {n}, t n -> t (S n).
|
||||||
|
```
|
||||||
|
|
||||||
|
The `F1` constructor here is equivalent to our \\(Z\\), and `FS` is equivalent to our \\(S\\).
|
||||||
|
To represent positive natural numbers \\(\\mathbb{N}^+\\), we simply take a regular natural
|
||||||
|
number from \\(\mathbb{N}\\) and find its successor using `S` (simply adding 1). Again, we have
|
||||||
|
to explicitly use `forall` in our type signatures.
|
||||||
|
|
||||||
|
We can use a similar technique to represent a list with a known number of elements, known
|
||||||
|
in the Idris and Coq world as a vector. Again, we only need two inference rules to define such
|
||||||
|
a vector:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{t : \text{Type}}
|
||||||
|
{[] : \text{Vec} \; t \; 0}
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{x : \text{t} \quad \textit{xs} : \text{Vec} \; t \; n}
|
||||||
|
{(x::\textit{xs}) : \text{Vec} \; t \; (n+1)}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
These rules read: the empty list \\([]\\) is zero-length vector of any type \\(t\\). Then,
|
||||||
|
if we take an element \\(x\\) of type \\(t\\), and an \\(n\\)-long vector \\(\textit{xs}\\) of \\(t\\),
|
||||||
|
then we can prepend \\(x\\) to \\(\textit{xs}\\) and get an \\((n+1)\\)-long vector of \\(t\\).
|
||||||
|
In Coq, we write this as follows ([originally from here](https://coq.inria.fr/library/Coq.Vectors.VectorDef.html#t)):
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Inductive t A : nat -> Type :=
|
||||||
|
| nil : t A 0
|
||||||
|
| cons : forall (h:A) (n:nat), t A n -> t A (S n).
|
||||||
|
```
|
||||||
|
|
||||||
|
The `nil` constructor represents the empty list \\([]\\), and `cons` represents
|
||||||
|
the operation of prepending an element (called `h` in the code and \\(x\\) in our inference rules)
|
||||||
|
to another vector of length \\(n\\), which remains unnamed in the code but is called \\(\\textit{xs}\\) in our rules.
|
||||||
|
|
||||||
|
These two definitions work together quite well. For instance, suppose we have a vector of length \\(n\\).
|
||||||
|
If we were to access its elements by indices starting at 0, we'd be allowed to access indices 0 through \\(n-1\\).
|
||||||
|
These are precisely the values of the finite natural numbers less than \\(n\\), \\(\\text{Fin} \\; n \\).
|
||||||
|
Thus, given such an index \\(\\text{Fin} \\; n\\) and a vector \\(\\text{Vec} \\; t \\; n\\), we are guaranteed
|
||||||
|
to be able to retrieve the element at the given index! In our code, we will not have to worry about bounds checking.
|
||||||
|
|
||||||
|
Of course, if our program has \\(n\\) elements, our program counter will be a finite number less than \\(n+1\\),
|
||||||
|
since there's always the possibility of it pointing past the instructions, indicating that we've finished
|
||||||
|
running the program. This leads to some minor complications: we can't safely access the program instruction
|
||||||
|
at index \\(\\text{Fin} \\; (n+1)\\). We can solve this problem by considering two cases:
|
||||||
|
either our index points one past the end of the program (in which case its value is exactly the finite
|
||||||
|
representation of \\(n\\)), or it's less than \\(n\\), in which case we can "tighten" the upper bound,
|
||||||
|
and convert that index into a \\(\\text{Fin} \\; n\\). We formalize it in a lemma:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 80 82 >}}
|
||||||
|
|
||||||
|
There's a little bit of a gotcha here. Instead of translating our above statement literally,
|
||||||
|
and returning a value that's the result of "tightening" our input `f`, we return a value
|
||||||
|
`f'` that can be "weakened" to `f`. This is because "tightening" is not a total function -
|
||||||
|
it's not always possible to convert a \\(\\text{Fin} \\; (n+1)\\) into a \\(\\text{Fin} \\; n\\).
|
||||||
|
However, "weakening" \\(\\text{Fin} \\; n\\) _is_ a total function, since a number less than \\(n\\)
|
||||||
|
is, by the transitive property of a total order, also less than \\(n+1\\).
|
||||||
|
|
||||||
|
The Coq proof for this claim is as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 88 97 >}}
|
||||||
|
|
||||||
|
The `Fin.rectS` function is a convenient way to perform inductive proofs over
|
||||||
|
our finite natural numbers. Informally, our proof proceeds as follows:
|
||||||
|
|
||||||
|
* If the current finite natural number is zero, take a look at the "bound" (which
|
||||||
|
we assume is nonzero, since there isn't a natural number less than zero).
|
||||||
|
* If this "bounding number" is one, our `f` can't be tightened any further,
|
||||||
|
since doing so would create a number less than zero. Fortunately, in this case,
|
||||||
|
`n` must be `0`, so `f` is the finite representation of `n`.
|
||||||
|
* Otherwise, `f` is most definitely a weakened version of another `f'`,
|
||||||
|
since the tightest possible type for zero has a "bounding number" of one, and
|
||||||
|
our "bounding number" is greater than that. We return a tighter version of our finite zero.
|
||||||
|
* If our number is a successor of another finite number, we check if that other number
|
||||||
|
can itself be tightened.
|
||||||
|
* If it can't be tightened, then our smaller number is a finite representation of
|
||||||
|
`n-1`. This, in turn, means that adding one to it will be the finite representation
|
||||||
|
of `n` (if \\(x\\) is equal to \\(n-1\\), then \\(x+1\\) is equal to \\(n\\)).
|
||||||
|
* If it _can_ be tightened, then so can the successor (if \\(x\\) is less
|
||||||
|
than \\(n-1\\), then \\(x+1\\) is less than \\(n\\)).
|
||||||
|
|
||||||
|
Next, let's talk about addition, specifically the kind of addition done by the \\(\\texttt{jmp}\\) instruction.
|
||||||
|
We can always add an integer to a natural number, but we can at best guarantee that the result
|
||||||
|
will be an integer. For instance, we can add `-1000` to `1`, and get `-999`, which is _not_ a natural
|
||||||
|
number. We implement this kind of addition in a function called `jump_t`:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 56 56 >}}
|
||||||
|
|
||||||
|
At the moment, its definition is not particularly important. What is important, though,
|
||||||
|
is that it takes a bounded natural number `pc` (our program counter), an integer `off`
|
||||||
|
(the offset provided by the jump instruction) and returns another integer representing
|
||||||
|
the final offset. Why are integers of type `t`? Well, it so happens
|
||||||
|
that Coq provides facilities for working with arbitrary implementations of integers,
|
||||||
|
without relying on how they are implemented under the hood. This can be seen in its
|
||||||
|
[`Coq.ZArith.Int`](https://coq.inria.fr/library/Coq.ZArith.Int.html) module,
|
||||||
|
which describes what functions and types an implementation of integers should provide.
|
||||||
|
Among those is `t`, the type of an integer in such an arbitrary implementation. We too
|
||||||
|
will not make an assumption about how the integers are implemented, and simply
|
||||||
|
use this generic `t` from now on.
|
||||||
|
|
||||||
|
Now, suppose we wanted to write a function that _does_ return a valid program
|
||||||
|
counter after adding the offset to it. Since it's possible for this function to fail
|
||||||
|
(for instance, if the offset is very negative), it has to return `option (fin (S n))`.
|
||||||
|
That is, this function may either fail (returning `None`) or succeed, returning
|
||||||
|
`Some f`, where `f` is of type `fin (S n)`, aka \\(\\text{Fin} \\; (n + 1)\\). Here's
|
||||||
|
the function in Coq (again, don't worry too much about the definition):
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 61 61 >}}
|
||||||
|
|
||||||
|
We will make use of this function when we define and verify our semantics.
|
||||||
|
Let's take a look at that next.
|
||||||
|
|
||||||
|
#### Semantics in Coq
|
||||||
|
|
||||||
|
Now that we've seen finite sets and vectors, it's time to use them to
|
||||||
|
encode our semantics in Coq. Before we do anything else, we need
|
||||||
|
to provide Coq definitions for the various components of our
|
||||||
|
language, much like what we did with `tinylang`. We can start with opcodes:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 20 23 >}}
|
||||||
|
|
||||||
|
Now we can define a few other parts of our language and semantics, namely
|
||||||
|
states, instructions and programs (which I called "inputs" since, we'll, they're
|
||||||
|
our puzzle input). A state is simply the 3-tuple of the program counter, the set
|
||||||
|
of valid program counters, and the accumulator. We write it as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 33 33 >}}
|
||||||
|
|
||||||
|
The star `*` is used here to represent a [product type](https://en.wikipedia.org/wiki/Product_type)
|
||||||
|
rather than arithmetic multiplication. Our state type accepts an argument,
|
||||||
|
`n`, much like a finite natural number or a vector. In fact, this `n` is passed on
|
||||||
|
to the state's program counter and set types. Rightly, a state for a program
|
||||||
|
of length \\(n\\) will not be of the same type as a state for a program of length \\(n+1\\).
|
||||||
|
|
||||||
|
An instruction is also a tuple, but this time containing only two elements: the opcode and
|
||||||
|
the number. We write this as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 36 36 >}}
|
||||||
|
|
||||||
|
Finally, we have to define the type of a program. This type will also be
|
||||||
|
indexed by `n`, the program's length. A program of length `n` is simply a
|
||||||
|
vector of instructions `inst` of length `n`. This leads to the following
|
||||||
|
definition:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 38 38 >}}
|
||||||
|
|
||||||
|
So far, so good! Finally, it's time to get started on the semantics themselves.
|
||||||
|
We begin with the inductive definition of \\((\\rightarrow_i)\\).
|
||||||
|
I think this is fairly straightforward. However, we do use
|
||||||
|
`t` instead of \\(n\\) from the rules, and we use `FS`
|
||||||
|
instead of \\(+1\\). Also, we make the formerly implicit
|
||||||
|
assumption that \\(c+n\\) is valid explicit, by
|
||||||
|
providing a proof that `valid_jump_t pc t = Some pc'`.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 103 110 >}}
|
||||||
|
|
||||||
|
Next, it will help us to combine the premises for
|
||||||
|
"failed" and "ok" terminations into Coq data types.
|
||||||
|
This will make it easier for us to formulate a lemma later on.
|
||||||
|
Here are the definitions:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 112 117 >}}
|
||||||
|
|
||||||
|
Since all of out "termination" rules start and
|
||||||
|
end in the same state, there's no reason to
|
||||||
|
write that state twice. Thus, both `done`
|
||||||
|
and `stuck` only take the input `inp`,
|
||||||
|
and the state, which includes the accumulator
|
||||||
|
`acc`, the set of allowed program counters `v`, and
|
||||||
|
the program counter at which the program came to an end.
|
||||||
|
When the program terminates successfully, this program
|
||||||
|
counter will be equal to the length of the program `n`,
|
||||||
|
so we use `nat_to_fin n`. On the other hand, if the program
|
||||||
|
terminates in as stuck state, it must be that it terminated
|
||||||
|
at a program counter that points to an instruction. Thus, this
|
||||||
|
program counter is actually a \\(\\text{Fin} \\; n\\), and not
|
||||||
|
a \\(\\text{Fin} \\ (n+1)\\), and is not in the set of allowed program counters.
|
||||||
|
We use the same "weakening" trick we saw earlier to represent
|
||||||
|
this.
|
||||||
|
|
||||||
|
Finally, we encode the three inference rules we came up with:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 119 126 >}}
|
||||||
|
|
||||||
|
Notice that we fused two of the premises in the last rule.
|
||||||
|
Instead of naming the instruction at the current program
|
||||||
|
counter (by writing \\(p[c] = i\\)) and using it in another premise, we simply use
|
||||||
|
`nth inp pc`, which corresponds to \\(p[c]\\) in our
|
||||||
|
"paper" semantics.
|
||||||
|
|
||||||
|
Before we go on writing some actual proofs, we have
|
||||||
|
one more thing we have to address. Earlier, we said:
|
||||||
|
|
||||||
|
> All jumps will lead either to an instruction, or right to the end of a program.
|
||||||
|
|
||||||
|
To make Coq aware of this constraint, we'll have to formalize it. To
|
||||||
|
start off, we'll define the notion of a "valid instruction", which is guaranteed
|
||||||
|
to keep the program counter in the correct range.
|
||||||
|
There are a couple of ways to do this, but we'll use yet another definition based
|
||||||
|
on inference rules. First, though, observe that the same instruction may be valid
|
||||||
|
for one program, and invalid for another. For instance, \\(\\texttt{jmp} \\; 100\\)
|
||||||
|
is perfectly valid for a program with thousands of instructions, but if it occurs
|
||||||
|
in a program with only 3 instructions, it will certainly lead to disaster. Specifically,
|
||||||
|
the validity of an instruction depends on the length of the program in which it resides,
|
||||||
|
and the program counter at which it's encountered.
|
||||||
|
Thus, we refine our idea of validity to "being valid for a program of length \\(n\\) at program counter \\(f\\)".
|
||||||
|
For this, we can use the following two inference rules:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\frac
|
||||||
|
{c : \text{Fin} \; n}
|
||||||
|
{\texttt{add} \; t \; \text{valid for} \; n, c }
|
||||||
|
\quad
|
||||||
|
\frac
|
||||||
|
{c : \text{Fin} \; n \quad o \in \{\texttt{nop}, \texttt{jmp}\} \quad J_v(c, t) = \text{Some} \; c' }
|
||||||
|
{o \; t \; \text{valid for} \; n, c }
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
The first rule states that if a program has length \\(n\\), then \\(\\texttt{add}\\) is valid
|
||||||
|
at any program counter whose value is less than \\(n\\). This is because running
|
||||||
|
\\(\\texttt{add}\\) will increment the program counter \\(c\\) by 1,
|
||||||
|
and thus, create a new program counter that's less than \\(n+1\\),
|
||||||
|
which, as we discussed above, is perfectly valid.
|
||||||
|
|
||||||
|
The second rule works for the other two instructions. It has an extra premise:
|
||||||
|
the result of `jump_valid_t` (written as \\(J_v\\)) has to be \\(\\text{Some} \\; c'\\),
|
||||||
|
that is, `jump_valid_t` must succeed. Note that we require this even for no-ops,
|
||||||
|
since it later turns out that one of the them may be a jump after all.
|
||||||
|
|
||||||
|
We now have our validity rules. If an instruction satisfies them for a given program
|
||||||
|
and at a given program counter, evaluating it will always result in a program counter that has a proper value.
|
||||||
|
We encode the rules in Coq as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 152 157 >}}
|
||||||
|
|
||||||
|
Note that we have three rules instead of two. This is because we "unfolded"
|
||||||
|
\\(o\\) from our second rule: rather than using set notation (or "or"), we
|
||||||
|
just generated two rules that vary in nothing but the operation involved.
|
||||||
|
|
||||||
|
Of course, we must have that every instruction in a program is valid.
|
||||||
|
We don't really need inference rules for this, as much as a "forall" quantifier.
|
||||||
|
A program \\(p\\) of length \\(n\\) is valid if the following holds:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\forall (c : \text{Fin} \; n). p[c] \; \text{valid for} \; n, c
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
That is, for every possible in-bounds program counter \\(c\\),
|
||||||
|
the instruction at the program counter is valid. We can now
|
||||||
|
encode this in Coq, too:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 160 161 >}}
|
||||||
|
|
||||||
|
In the above, `n` is made implicit where possible.
|
||||||
|
Since \\(c\\) (called `pc` in the code) is of type \\(\\text{Fin} \\; n\\), there's no
|
||||||
|
need to write \\(n\\) _again_. The curly braces tell Coq to infer that
|
||||||
|
argument where possible.
|
||||||
|
|
||||||
|
### Proving Termination
|
||||||
|
Here we go! It's finally time to make some claims about our
|
||||||
|
definitions. Who knows - maybe we wrote down total garbage!
|
||||||
|
We will be creating several related lemmas and theorems.
|
||||||
|
All of them share two common assumptions:
|
||||||
|
|
||||||
|
* We have some valid program `inp` of length `n`.
|
||||||
|
* This program is a valid input, that is, `valid_input` holds for it.
|
||||||
|
There's no sense in arguing based on an invalid input program.
|
||||||
|
|
||||||
|
We represent these grouped assumptions by opening a Coq
|
||||||
|
`Section`, which we call `ValidInput`, and listing our assumptions:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 163 166 >}}
|
||||||
|
|
||||||
|
We had to also explicitly mention the length `n` of our program.
|
||||||
|
From now on, the variables `n`, `inp`, and `Hv` will be
|
||||||
|
available to all of the proofs we write in this section.
|
||||||
|
The first proof is rather simple. The claim is:
|
||||||
|
|
||||||
|
> For our valid program, at any program counter `pc`
|
||||||
|
and accumulator `acc`, there must exist another program
|
||||||
|
counter `pc'` and accumulator `acc'` such that the
|
||||||
|
instruction evaluation relation \\((\rightarrow_i)\\)
|
||||||
|
connects the two. That is, valid addresses aside,
|
||||||
|
we can always make a step.
|
||||||
|
|
||||||
|
Here is this claim encoded in Coq:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 168 169 >}}
|
||||||
|
|
||||||
|
We start our proof by introducing all the relevant variables into
|
||||||
|
the global context. I've mentioned this when I wrote about
|
||||||
|
day 1, but here's the gist: the `intros` keyword takes
|
||||||
|
variables from a `forall`, and makes them concrete.
|
||||||
|
In short, `intros x` is very much like saying "suppose
|
||||||
|
we have an `x`", and going on with the proof.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 170 171 >}}
|
||||||
|
|
||||||
|
Here, we said "take any program counter `pc` and any
|
||||||
|
accumulator `acc`". Now what? Well, first of all,
|
||||||
|
we want to take a look at the instruction at the current
|
||||||
|
`pc`. We know that this instruction is a combination
|
||||||
|
of an opcode and a number, so we use `destruct` to get
|
||||||
|
access to both of these parts:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 172 172 >}}
|
||||||
|
|
||||||
|
Now, Coq reports the following proof state:
|
||||||
|
|
||||||
|
```
|
||||||
|
1 subgoal
|
||||||
|
|
||||||
|
n : nat
|
||||||
|
inp : input n
|
||||||
|
Hv : valid_input inp
|
||||||
|
pc : Fin.t n
|
||||||
|
acc : t
|
||||||
|
o : opcode
|
||||||
|
t0 : t
|
||||||
|
Hop : nth inp pc = (o, t0)
|
||||||
|
|
||||||
|
========================= (1 / 1)
|
||||||
|
|
||||||
|
exists (pc' : fin (S n)) (acc' : t),
|
||||||
|
step_noswap (o, t0) (pc, acc) (pc', acc')
|
||||||
|
```
|
||||||
|
|
||||||
|
We have some opcode `o`, and some associated number
|
||||||
|
`t0`, and we must show that there exist a `pc'`
|
||||||
|
and `acc'` to which we can move on. To prove
|
||||||
|
that something exists in Coq, we must provide
|
||||||
|
an instance of that "something". If we claim
|
||||||
|
that there exists a dog that's not a good boy,
|
||||||
|
we better have this elusive creature in hand.
|
||||||
|
In other words, proofs in Coq are [constructive](https://en.wikipedia.org/wiki/Constructive_proof).
|
||||||
|
Without knowing the kind of operation we're dealing with, we can't
|
||||||
|
say for sure how the step will proceed. Thus, we proceed by
|
||||||
|
case analysis on `o`.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 173 173 >}}
|
||||||
|
|
||||||
|
There are three possible cases we have to consider,
|
||||||
|
one for each type of instruction.
|
||||||
|
|
||||||
|
* If the instruction is \\(\\texttt{add}\\), we know
|
||||||
|
that `pc' = pc + 1` and `acc' = acc + t0`. That is,
|
||||||
|
the program counter is simply incremented, and the accumulator
|
||||||
|
is modified with the number part of the instruction.
|
||||||
|
* If the instruction is \\(\\texttt{nop}\\), the program
|
||||||
|
coutner will again be incremented (`pc' = pc + 1`),
|
||||||
|
but the accumulator will stay the same, so `acc' = acc`.
|
||||||
|
* If the instruction is \\(\\texttt{jmp}\\), things are
|
||||||
|
more complicated. We must rely on the assumption
|
||||||
|
that our input is valid, which tells us that adding
|
||||||
|
`t0` to our `pc` will result in `Some f`, and not `None`.
|
||||||
|
Given this, we have `pc' = f`, and `acc' = acc`.
|
||||||
|
|
||||||
|
This is how these three cases are translated to Coq:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 174 177 >}}
|
||||||
|
|
||||||
|
For the first two cases, we simply provide the
|
||||||
|
values we expect for `pc'` and `acc'`, and
|
||||||
|
apply the corresponding inference rule that
|
||||||
|
is satisfied by these values. For the third case, we have
|
||||||
|
to invoke `Hv`, the hypothesis that our input is valid.
|
||||||
|
In particular, we care about the instruction at `pc`,
|
||||||
|
so we use `specialize` to plug `pc` into the more general
|
||||||
|
hypothesis. We then replace `nth inp pc` with its known
|
||||||
|
value, `(jmp, t0)`. This tells us the following, in Coq's words:
|
||||||
|
|
||||||
|
```
|
||||||
|
Hv : valid_inst (jmp, t0) pc
|
||||||
|
```
|
||||||
|
|
||||||
|
That is, `(jmp, t0)` is a valid instruction at `pc`. Then, using
|
||||||
|
Coq's `inversion` tactic, we ask: how is this possible? There is
|
||||||
|
only one inference rule that gives us such a conclusion, and it is named `valid_inst_jmp`
|
||||||
|
in our Coq code. Since we have a proof that our `jmp` is valid,
|
||||||
|
it must mean that this rule was used. Furthermore, since this
|
||||||
|
rule requires that `valid_jump_t` evaluates to `Some f'`, we know
|
||||||
|
that this must be the case here! Coq now has adds the following
|
||||||
|
two lines to our proof state:
|
||||||
|
|
||||||
|
```
|
||||||
|
f' : fin (S n)
|
||||||
|
H0 : valid_jump_t pc t0 = Some f'
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, we specify, as mentioned earlier, that `pc' = f'` and `acc' = acc`.
|
||||||
|
As before, we apply the corresponding step rule for `jmp`. When it asks
|
||||||
|
for a proof that `valid_jump_t` produces a valid program counter,
|
||||||
|
we hand it `H0` using `apply H0`. And with that, Coq is happy!
|
||||||
|
|
||||||
|
Next, we prove a claim that a valid program can always do _something_,
|
||||||
|
and that something is one of three things:
|
||||||
|
|
||||||
|
* It can terminate in the "ok" state if the program counter
|
||||||
|
reaches the programs' end.
|
||||||
|
* It can terminate with an error if it's currently at a program
|
||||||
|
counter that is not included in the valid set.
|
||||||
|
* Otherwise, it can run the current instruction and advance
|
||||||
|
to a "next" state.
|
||||||
|
|
||||||
|
Alternatively, we could say that one of the inference rules
|
||||||
|
for \\((\\Rightarrow_p)\\) must apply. This is not the case if the input
|
||||||
|
is not valid, since, as I said
|
||||||
|
before, an arbitrary input program can lead us to jump
|
||||||
|
to a negative address (or to an address _way_ past the end of the program).
|
||||||
|
Here's the claim, translated to Coq:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 181 186 >}}
|
||||||
|
|
||||||
|
Informally, we can prove this as follows:
|
||||||
|
|
||||||
|
* If the current program counter is equal to the length
|
||||||
|
of the program, we've reached the end. Thus, the program
|
||||||
|
can terminate in the "ok" state.
|
||||||
|
* Otherwise, the current program counter must be
|
||||||
|
less than the length of the program.
|
||||||
|
* If we've already encountered this program counter (that is,
|
||||||
|
if it's gone from the set of valid program counters),
|
||||||
|
then the program will terminate in the "error" state.
|
||||||
|
* Otherwise, the program counter is in the set of
|
||||||
|
valid instructions. By our earlier theorem, in a valid
|
||||||
|
program, the instruction at any program counter can be correctly
|
||||||
|
executed, taking us to the next state. Now too
|
||||||
|
our program can move to this next state.
|
||||||
|
|
||||||
|
Below is the Coq translation of the above.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 187 203 >}}
|
||||||
|
|
||||||
|
It doesn't seem like we're that far from being done now.
|
||||||
|
A program can always take a step, and each time it does,
|
||||||
|
the set of valid program counters decreases in size. Eventually,
|
||||||
|
this set will become empty, so if nothing else, our program will
|
||||||
|
eventually terminate in an "error" state. Thus, it will stop
|
||||||
|
running no matter what.
|
||||||
|
|
||||||
|
This seems like a task for induction, in this case on the size
|
||||||
|
of the valid set. In particular, strong mathematical induction
|
||||||
|
{{< sidenote "right" "strong-induction-note" "seem to work best." >}}
|
||||||
|
Why strong induction? If we remove a single element from a set,
|
||||||
|
its size should decrease strictly by 1. Thus, why would we need
|
||||||
|
to care about sets of <em>all</em> sizes less than the current
|
||||||
|
set's size?<br>
|
||||||
|
<br>
|
||||||
|
Unfortunately, we're not working with purely mathematical sets.
|
||||||
|
Coq's default facility for sets is simply a layer on top
|
||||||
|
of good old lists, and makes no effort to be "correct by construction".
|
||||||
|
It is thus perfectly possible to have a "set" which inlcudes an element
|
||||||
|
twice. Depending on the implementation of <code>set_remove</code>,
|
||||||
|
we may end up removing the repeated element multiple times, thereby
|
||||||
|
shrinking the length of our list by more than 1. I'd rather
|
||||||
|
not worry about implementation details like that.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
Someone on StackOverflow [implemented this](https://stackoverflow.com/questions/45872719/how-to-do-induction-on-the-length-of-a-list-in-coq),
|
||||||
|
so I'll just use it. The Coq theorem corresonding to strong induction
|
||||||
|
on the length of a list is as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 205 207 >}}
|
||||||
|
|
||||||
|
It reads,
|
||||||
|
|
||||||
|
> If for some list `l`, the property `P` holding for all lists
|
||||||
|
shorter than `l` means that it also holds for `l` itself, then
|
||||||
|
`P` holds for all lists.
|
||||||
|
|
||||||
|
This is perhaps not particularly elucidating. We can alternatively
|
||||||
|
think of this as trying to prove some property for all lists `l`.
|
||||||
|
We start with all empty lists. Here, we have nothing else to rely
|
||||||
|
on; there are no lists shorter than the empty list, and our property
|
||||||
|
must hold for all empty lists. Then, we move on to proving
|
||||||
|
the property for all lists of length 1, already knowing that it holds
|
||||||
|
for all empty lists. Once we're done there, we move on to proving
|
||||||
|
that `P` holds for all lists of length 2, now knowing that it holds
|
||||||
|
for all empty lists _and_ all lists of length 1. We continue
|
||||||
|
doing this, eventually covering lists of any length.
|
||||||
|
|
||||||
|
Before proving termination, there's one last thing we have to
|
||||||
|
take care off. Coq's standard library does not come with
|
||||||
|
a proof that removing an element from a set makes it smaller;
|
||||||
|
we have to provide it ourselves. Here's the claim encoded
|
||||||
|
in Coq:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 217 219 >}}
|
||||||
|
|
||||||
|
This reads, "if a set `s` contains a finite natural
|
||||||
|
number `f`, removing `f` from `s` reduces the set's size".
|
||||||
|
The details of the proof are not particularly interesting,
|
||||||
|
and I hope that you understand intuitively why this is true.
|
||||||
|
Finally, we make our termination claim.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 230 231 >}}
|
||||||
|
|
||||||
|
It's quite a strong claim - given _any_ program counter,
|
||||||
|
set of valid addresses, and accumulator, a valid input program
|
||||||
|
will terminate. Let's take a look at the proof.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 232 234 >}}
|
||||||
|
|
||||||
|
We use `intros` again. However, it brings in variables
|
||||||
|
in order, and we really only care about the _second_ variable.
|
||||||
|
We thus `intros` the first two, and then "put back" the first
|
||||||
|
one using `generalize dependent`. Then, we proceed by
|
||||||
|
induction on length, as seen above.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 235 236>}}
|
||||||
|
|
||||||
|
Now we're in the "inductive step". Our inductive hypothesis
|
||||||
|
is that any set of valid addresses smaller than the current one will
|
||||||
|
guarantee that the program will terminate. We must show
|
||||||
|
that using our set, too, will guarantee termination. We already
|
||||||
|
know that a valid input, given a state, can have one of three
|
||||||
|
possible outcomes: "ok" termination, "failed" termination,
|
||||||
|
or a "step". We use `destruct` to take a look at each of these
|
||||||
|
in turn. The first two cases ("ok" termination and "failed" termination)
|
||||||
|
are fairly trivial:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 237 240 >}}
|
||||||
|
|
||||||
|
We basically connect the dots between the premises (in a form like `done`)
|
||||||
|
and the corresponding inference rule (`run_noswap_ok`). The more
|
||||||
|
interesting case is when we can take a step.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "aoc-2020/day8.v" 241 253 >}}
|
||||||
|
|
||||||
|
Since we know we can take a step, we know that we'll be removing
|
||||||
|
the current program counter from the set of valid addresses. This
|
||||||
|
set must currently contain the present program counter (since otherwise
|
||||||
|
we'd have "failed"), and thus will shrink when we remove it. This,
|
||||||
|
in turn, lets us use the inductive hypothesis: it tells us that no matter the
|
||||||
|
program counter or accumulator, if we start with this new "shrunk"
|
||||||
|
set, we will terminate in some state. Coq's constructive
|
||||||
|
nature helps us here: it doesn't just tells us that there is some state
|
||||||
|
in which we terminate - it gives us that state! We use `edestruct` to get
|
||||||
|
a handle on this final state, which Coq automatically names `x`. At this
|
||||||
|
time Coq still isn't convinced that our new set is smaller, so we invoke
|
||||||
|
our earlier `set_remove_length` theorem to placate it.
|
||||||
|
|
||||||
|
We now have all the pieces: we know that we can take a step, removing
|
||||||
|
the current program counter from our current set. We also know that
|
||||||
|
with that newly shrunken set, we'll terminate in some final state `x`.
|
||||||
|
Thus, all that's left to say is to apply our "step" rule. It asks
|
||||||
|
us for three things:
|
||||||
|
|
||||||
|
1. That the current program counter is in the set. We've long since
|
||||||
|
established this, and `auto` takes care of that.
|
||||||
|
2. That a step is possible. We've already established this, too,
|
||||||
|
since we're in the "can take a step" case. We apply `Hst`,
|
||||||
|
the hypothesis that confirms that we can, indeed, step.
|
||||||
|
3. That we terminate after this. The `x` we got
|
||||||
|
from our induction hypothesis came with a proof that
|
||||||
|
running with the "next" program counter and accumulator
|
||||||
|
will result in termination. We apply this proof, automatically
|
||||||
|
named `H0` by Coq.
|
||||||
|
|
||||||
|
And that's it! We've proved that a program terminates no matter what.
|
||||||
|
This has also (almost!) given us a solution to part 1. Consider the case
|
||||||
|
in which we start with program counter 0, accumulator 0, and the "full"
|
||||||
|
set of allowed program counters. Since our proof works for _all_ configurations,
|
||||||
|
it will also work for this one. Furthermore, since Coq proofs are constructive,
|
||||||
|
this proof will __return to us the final program counter and accumulator!__
|
||||||
|
This is precisely what we'd need to solve part 1.
|
||||||
|
|
||||||
|
But wait, almost? What's missing? We're missing a few implementation details:
|
||||||
|
* We've not provided a concrete impelmentation of integers. The simplest
|
||||||
|
thing to do here would be to use [`Coq.ZArith.BinInt`](https://coq.inria.fr/library/Coq.ZArith.BinInt.html),
|
||||||
|
for which there is a module [`Z_as_Int`](https://coq.inria.fr/library/Coq.ZArith.Int.html#Z_as_Int)
|
||||||
|
that provides `t` and friends.
|
||||||
|
* We assumed (reasonably, I would say) that it's possible to convert a natural
|
||||||
|
number to an integer. If we're using the aforementioned `BinInt` module,
|
||||||
|
we can use [`Z.of_nat`](https://coq.inria.fr/library/Coq.ZArith.BinIntDef.html#Z.of_nat).
|
||||||
|
* We also assumed (still reasonably) that we can try convert an integer
|
||||||
|
back to a finite natural number, failing if it's too small or too large.
|
||||||
|
There's no built-in function for this, but `Z`, for one, distinguishes
|
||||||
|
between the "positive", "zero", and "negative" cases, and we have
|
||||||
|
`Pos.to_nat` for the positive case.
|
||||||
|
|
||||||
|
Well, I seem to have covered all the implementation details. Why not just
|
||||||
|
go ahead and solve the problem? I tried, and ran into two issues:
|
||||||
|
|
||||||
|
* Although this is "given", we assumed that our input program will be
|
||||||
|
valid. For us to use the result of our Coq proof, we need to provide it
|
||||||
|
a constructive proof that our program is valid. Creating this proof is tedious
|
||||||
|
in theory, and quite difficult in practice: I've run into a
|
||||||
|
strange issue trying to pattern match on finite naturals.
|
||||||
|
* Even supposing we _do_ have a proof of validity, I'm not certain
|
||||||
|
if it's possible to actually extract an answer from it. It seems
|
||||||
|
that Coq distinguishes between proofs (things of type `Prop`) and
|
||||||
|
values (things of type `Set`). things of types `Prop` are supposed
|
||||||
|
to be _erased_. This means that when you convert Coq code,
|
||||||
|
to, say, Haskell, you will see no trace of any `Prop`s in that generated
|
||||||
|
code. Unfortunately, this also means we
|
||||||
|
[can't use our proofs to construct values](https://stackoverflow.com/questions/27322979/why-coq-doesnt-allow-inversion-destruct-etc-when-the-goal-is-a-type),
|
||||||
|
even though our proof objects do indeed contain them.
|
||||||
|
|
||||||
|
So, we "theoretically" have a solution to part 1, down to the algorithm
|
||||||
|
used to compute it and a proof that our algorithm works. In "reality", though, we
|
||||||
|
can't actually use this solution to procure an answer. Like we did with day 1, we'll have
|
||||||
|
to settle for only a proof.
|
||||||
|
|
||||||
|
Let's wrap up for this post. It would be more interesting to devise and
|
||||||
|
formally verify an algorithm for part 2, but this post has already gotten
|
||||||
|
quite long and contains a lot of information. Perhaps I will revisit this
|
||||||
|
at a later time. Thanks for reading!
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: A Language for an Assignment - Homework 2
|
title: A Language for an Assignment - Homework 2
|
||||||
date: 2019-12-30T20:05:10-08:00
|
date: 2019-12-30T20:05:10-08:00
|
||||||
tags: ["Haskell", "Python", "Algorithms"]
|
tags: ["Haskell", "Python", "Algorithms", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
After the madness of the
|
After the madness of the
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: A Language for an Assignment - Homework 3
|
title: A Language for an Assignment - Homework 3
|
||||||
date: 2020-01-02T22:17:43-08:00
|
date: 2020-01-02T22:17:43-08:00
|
||||||
tags: ["Haskell", "Python", "Algorithms"]
|
tags: ["Haskell", "Python", "Algorithms", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
It rained in Sunriver on New Year's Eve, and it continued to rain
|
It rained in Sunriver on New Year's Eve, and it continued to rain
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ title: Compiling a Functional Language Using C++, Part 10 - Polymorphism
|
|||||||
date: 2020-03-25T17:14:20-07:00
|
date: 2020-03-25T17:14:20-07:00
|
||||||
tags: ["C and C++", "Functional Languages", "Compilers"]
|
tags: ["C and C++", "Functional Languages", "Compilers"]
|
||||||
description: "In this post, we extend our compiler's typechecking algorithm to implement the Hindley-Milner type system, allowing for polymorphic functions."
|
description: "In this post, we extend our compiler's typechecking algorithm to implement the Hindley-Milner type system, allowing for polymorphic functions."
|
||||||
|
favorite: true
|
||||||
---
|
---
|
||||||
|
|
||||||
[In part 8]({{< relref "08_compiler_llvm.md" >}}), we wrote some pretty interesting programs in our little language.
|
[In part 8]({{< relref "08_compiler_llvm.md" >}}), we wrote some pretty interesting programs in our little language.
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
title: "How Many Values Does a Boolean Have?"
|
title: "How Many Values Does a Boolean Have?"
|
||||||
date: 2020-08-21T23:05:55-07:00
|
date: 2020-08-21T23:05:55-07:00
|
||||||
tags: ["Java", "Haskell", "C and C++"]
|
tags: ["Java", "Haskell", "C and C++"]
|
||||||
|
favorite: true
|
||||||
---
|
---
|
||||||
|
|
||||||
A friend of mine recently had an interview for a software
|
A friend of mine recently had an interview for a software
|
||||||
|
|||||||
BIN
content/blog/codelines/example.png
Normal file
BIN
content/blog/codelines/example.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
268
content/blog/codelines/index.md
Normal file
268
content/blog/codelines/index.md
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
---
|
||||||
|
title: "Pleasant Code Includes with Hugo"
|
||||||
|
date: 2021-01-13T21:31:29-08:00
|
||||||
|
tags: ["Hugo"]
|
||||||
|
---
|
||||||
|
|
||||||
|
Ever since I started [the compiler series]({{< relref "00_compiler_intro.md" >}}),
|
||||||
|
I began to include more and more fragments of code into my blog.
|
||||||
|
I didn't want to be copy-pasting my code between my project
|
||||||
|
and my Markdown files, so I quickly wrote up a Hugo [shortcode](https://gohugo.io/content-management/shortcodes/)
|
||||||
|
to pull in other files in the local directory. I've since improved on this
|
||||||
|
some more, so I thought I'd share what I created with others.
|
||||||
|
|
||||||
|
### Including Entire Files and Lines
|
||||||
|
My needs for snippets were modest at first. For the most part,
|
||||||
|
I had a single code file that I wanted to present, so it was
|
||||||
|
acceptable to plop it in the middle of my post in one piece.
|
||||||
|
The shortcode for that was quite simple:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ highlight (readFile (printf "code/%s" (.Get 1))) (.Get 0) "" }}
|
||||||
|
```
|
||||||
|
|
||||||
|
This leverages Hugo's built-in [`highlight`](https://gohugo.io/functions/highlight/)
|
||||||
|
function to provide syntax highlighting to the included snippet. Hugo
|
||||||
|
doesn't guess at the language of the code, so you have to manually provide
|
||||||
|
it. Calling this shortcode looks as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{</* codeblock "C++" "compiler/03/type.hpp" */>}}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that this implicitly adds the `code/` prefix to all
|
||||||
|
the files I include. This is a personal convention: I want
|
||||||
|
all my code to be inside a dedicated directory.
|
||||||
|
|
||||||
|
Of course, including entire files only takes you so far.
|
||||||
|
What if you only need to discuss a small part of your code?
|
||||||
|
Alternaitvely, what if you want to present code piece-by-piece,
|
||||||
|
in the style of literate programming? I quickly ran into the
|
||||||
|
need to do this, for which I wrote another shortcode:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ $s := (readFile (printf "code/%s" (.Get 1))) }}
|
||||||
|
{{ $t := split $s "\n" }}
|
||||||
|
{{ if not (eq (int (.Get 2)) 1) }}
|
||||||
|
{{ .Scratch.Set "u" (after (sub (int (.Get 2)) 1) $t) }}
|
||||||
|
{{ else }}
|
||||||
|
{{ .Scratch.Set "u" $t }}
|
||||||
|
{{ end }}
|
||||||
|
{{ $v := first (add (sub (int (.Get 3)) (int (.Get 2))) 1) (.Scratch.Get "u") }}
|
||||||
|
{{ if (.Get 4) }}
|
||||||
|
{{ .Scratch.Set "opts" (printf ",%s" (.Get 4)) }}
|
||||||
|
{{ else }}
|
||||||
|
{{ .Scratch.Set "opts" "" }}
|
||||||
|
{{ end }}
|
||||||
|
{{ highlight (delimit $v "\n") (.Get 0) (printf "linenos=table,linenostart=%d%s" (.Get 2) (.Scratch.Get "opts")) }}
|
||||||
|
```
|
||||||
|
|
||||||
|
This shortcode takes a language and a filename as before, but it also takes
|
||||||
|
the numbers of the first and last lines indicating the part of the code that should be included. After
|
||||||
|
splitting the contents of the file into lines, it throws away all lines before and
|
||||||
|
after the window of code that you want to include. It seems to me (from my commit history)
|
||||||
|
that Hugo's [`after`](https://gohugo.io/functions/after/) function (which should behave
|
||||||
|
similarly to Haskell's `drop`) doesn't like to be given an argument of `0`.
|
||||||
|
I had to add a special case for when this would occur, where I simply do not invoke `after` at all.
|
||||||
|
The shortcode can be used as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{</* codelines "C++" "compiler/04/ast.cpp" 19 22 */>}}
|
||||||
|
```
|
||||||
|
|
||||||
|
To support a fuller range of Hugo's functionality, I also added an optional argument that
|
||||||
|
accepts Hugo's Chroma settings. This way, I can do things like highlight certain
|
||||||
|
lines in my code snippet, which is done as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{</* codelines "Idris" "typesafe-interpreter/TypesafeIntrV3.idr" 31 39 "hl_lines=7 8 9" */>}}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the `hl_lines` field doesn't seem to work properly with `linenostart`, which means
|
||||||
|
that the highlighted lines are counted from 1 no matter what. This is why in the above snippet,
|
||||||
|
although I include lines 31 through 39, I feed lines 7, 8, and 9 to `hl_lines`. It's unusual,
|
||||||
|
but hey, it works!
|
||||||
|
|
||||||
|
### Linking to Referenced Code
|
||||||
|
Some time after implementing my initial system for including lines of code,
|
||||||
|
I got an email from a reader who pointed out that it was hard for them to find
|
||||||
|
the exact file I was referencing, and to view the surrounding context of the
|
||||||
|
presented lines. To address this, I decided that I'd include the link
|
||||||
|
to the file in question. After all, my website and all the associated
|
||||||
|
code is on a [Git server I host](https://dev.danilafe.com/Web-Projects/blog-static),
|
||||||
|
so any local file I'm referencing should -- assuming it was properly committed --
|
||||||
|
show up there, too. I hardcoded the URL of the `code` directory on the web interface,
|
||||||
|
and appended the relative path of each included file to it. The shortcode came out as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ $s := (readFile (printf "code/%s" (.Get 1))) }}
|
||||||
|
{{ $t := split $s "\n" }}
|
||||||
|
{{ if not (eq (int (.Get 2)) 1) }}
|
||||||
|
{{ .Scratch.Set "u" (after (sub (int (.Get 2)) 1) $t) }}
|
||||||
|
{{ else }}
|
||||||
|
{{ .Scratch.Set "u" $t }}
|
||||||
|
{{ end }}
|
||||||
|
{{ $v := first (add (sub (int (.Get 3)) (int (.Get 2))) 1) (.Scratch.Get "u") }}
|
||||||
|
{{ if (.Get 4) }}
|
||||||
|
{{ .Scratch.Set "opts" (printf ",%s" (.Get 4)) }}
|
||||||
|
{{ else }}
|
||||||
|
{{ .Scratch.Set "opts" "" }}
|
||||||
|
{{ end }}
|
||||||
|
<div class="highlight-group">
|
||||||
|
<div class="highlight-label">From <a href="https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/{{ .Get 1 }}">{{ path.Base (.Get 1) }}</a>,
|
||||||
|
{{ if eq (.Get 2) (.Get 3) }}line {{ .Get 2 }}{{ else }} lines {{ .Get 2 }} through {{ .Get 3 }}{{ end }}</div>
|
||||||
|
{{ highlight (delimit $v "\n") (.Get 0) (printf "linenos=table,linenostart=%d%s" (.Get 2) (.Scratch.Get "opts")) }}
|
||||||
|
</div>
|
||||||
|
```
|
||||||
|
|
||||||
|
This results in code blocks like the one in the image below. The image
|
||||||
|
is the result of the `codelines` call for the Idris language, presented above.
|
||||||
|
|
||||||
|
{{< figure src="example.png" caption="An example of how the code looks." class="medium" >}}
|
||||||
|
|
||||||
|
I got a lot of mileage out of this setup . . . until I wanted to include code from _other_ git repositories.
|
||||||
|
For instance, I wanted to talk about my [Advent of Code](https://adventofcode.com/) submissions,
|
||||||
|
without having to copy-paste the code into my blog repository!
|
||||||
|
|
||||||
|
### Code from Submodules
|
||||||
|
My first thought when including code from other repositories was to use submodules.
|
||||||
|
This has the added advantage of "pinning" the version of the code I'm talking about,
|
||||||
|
which means that even if I push significant changes to the other repository, the code
|
||||||
|
in my blog will remain the same. This, in turn, means that all of my `codelines`
|
||||||
|
shortcodes will work as intended.
|
||||||
|
|
||||||
|
The problem is, most Git web interfaces (my own included) don't display paths corresponding
|
||||||
|
to submodules. Thus, even if all my code is checked out and Hugo correctly
|
||||||
|
pulls the selected lines into its HTML output, the _links to the file_ remain
|
||||||
|
broken!
|
||||||
|
|
||||||
|
There's no easy way to address this, particularly because _different submodules
|
||||||
|
can be located on different hosts_! The Git URL used for a submodule is
|
||||||
|
not known to Hugo (since, to the best of my knowledge, it can't run
|
||||||
|
shell commands), and it could reside on `dev.danilafe.com`, or `github.com`,
|
||||||
|
or elsewhere. Fortunately, it's fairly easy to tell when a file is part
|
||||||
|
of a submodule, and which submodule that is. It's sufficient to find
|
||||||
|
the longest submodule path that matches the selected file. If no
|
||||||
|
submodule path matches, then the file is part of the blog repository,
|
||||||
|
and no special action is needed.
|
||||||
|
|
||||||
|
Of course, this means that Hugo needs to be made aware of the various
|
||||||
|
submodules in my repository. It also needs to be aware of the submodules
|
||||||
|
_inside_ those submodules, and so on: it needs to be recursive. Git
|
||||||
|
has a command to list all submodules recursively:
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
git submodule status --recursive
|
||||||
|
```
|
||||||
|
|
||||||
|
However, this only prints the commit, submodule path, and the upstream branch.
|
||||||
|
I don't think there's a way to list the remotes' URLs with this command; however,
|
||||||
|
we do _need_ the URLs, since that's how we create links to the Git web interfaces.
|
||||||
|
|
||||||
|
There's another issue: how do we let Hugo know about the various submodules,
|
||||||
|
even if we can find them? Hugo can read files, but doing any serious
|
||||||
|
text processing is downright impractical. However, Hugo
|
||||||
|
itself is not able to run commands, so it needs to be able to read in
|
||||||
|
the output of another command that _can_ find submodules.
|
||||||
|
|
||||||
|
I settled on using Hugo's `params` configuration option. This
|
||||||
|
allows users to communicate arbitrary properties to Hugo themes
|
||||||
|
and templates. In my case, I want to communicate a collection
|
||||||
|
of submodules. I didn't know about TOML's inline tables, so
|
||||||
|
I decided to represent this collection as a map of (meaningless)
|
||||||
|
submodule names to tables:
|
||||||
|
|
||||||
|
```TOML
|
||||||
|
[params]
|
||||||
|
[params.submoduleLinks]
|
||||||
|
[params.submoduleLinks.aoc2020]
|
||||||
|
url = "https://dev.danilafe.com/Advent-of-Code/AdventOfCode-2020/src/commit/7a8503c3fe1aa7e624e4d8672aa9b56d24b4ba82"
|
||||||
|
path = "aoc-2020"
|
||||||
|
```
|
||||||
|
|
||||||
|
Since it was seemingly impossible to wrangle Git into outputting
|
||||||
|
all of this information using one command, I decided
|
||||||
|
to write a quick Ruby script to generate a list of submodules
|
||||||
|
as follows. I had to use `cd` in one of my calls to Git
|
||||||
|
because Git's `--git-dir` option doesn't seem to work
|
||||||
|
with submodules, treating them like a "bare" checkout.
|
||||||
|
I also chose to use an allowlist of remote URLs,
|
||||||
|
since the URL format for linking to files in a
|
||||||
|
particular repository differs from service to service.
|
||||||
|
For now, I only use my own Git server, so only `dev.danilafe.com`
|
||||||
|
is allowed; however, just by adding `elsif`s to my code,
|
||||||
|
I can add other services in the future.
|
||||||
|
|
||||||
|
```Ruby
|
||||||
|
puts "[params]"
|
||||||
|
puts " [params.submoduleLinks]"
|
||||||
|
|
||||||
|
def each_submodule(base_path)
|
||||||
|
`cd #{base_path} && git submodule status`.lines do |line|
|
||||||
|
hash, path = line[1..].split " "
|
||||||
|
full_path = "#{base_path}/#{path}"
|
||||||
|
url = `git config --file #{base_path}/.gitmodules --get 'submodule.#{path}.url'`.chomp.delete_suffix(".git")
|
||||||
|
safe_name = full_path.gsub(/\/|-|_\./, "")
|
||||||
|
|
||||||
|
if url =~ /dev.danilafe.com/
|
||||||
|
file_url = "#{url}/src/commit/#{hash}"
|
||||||
|
else
|
||||||
|
raise "Submodule URL #{url.dump} not in a known format!"
|
||||||
|
end
|
||||||
|
|
||||||
|
yield ({ :path => full_path, :url => file_url, :name => safe_name })
|
||||||
|
each_submodule(full_path) { |m| yield m }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
each_submodule(".") do |m|
|
||||||
|
next unless m[:path].start_with? "./code/"
|
||||||
|
puts " [params.submoduleLinks.#{m[:name].delete_prefix(".code")}]"
|
||||||
|
puts " url = #{m[:url].dump}"
|
||||||
|
puts " path = #{m[:path].delete_prefix("./code/").dump}"
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
I pipe the output of this script into a separate configuration file
|
||||||
|
called `config-gen.toml`, and then run Hugo as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
hugo --config config.toml,config-gen.toml
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, I had to modify my shortcode to find and handle the longest submodule prefix.
|
||||||
|
Here's the relevant portion, and you can
|
||||||
|
[view the entire file here](https://dev.danilafe.com/Web-Projects/blog-static/src/commit/bfeae89ab52d1696c4a56768b7f0c6682efaff82/themes/vanilla/layouts/shortcodes/codelines.html).
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ .Scratch.Set "bestLength" -1 }}
|
||||||
|
{{ .Scratch.Set "bestUrl" (printf "https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/%s" (.Get 1)) }}
|
||||||
|
{{ $filePath := (.Get 1) }}
|
||||||
|
{{ $scratch := .Scratch }}
|
||||||
|
{{ range $module, $props := .Site.Params.submoduleLinks }}
|
||||||
|
{{ $path := index $props "path" }}
|
||||||
|
{{ $bestLength := $scratch.Get "bestLength" }}
|
||||||
|
{{ if and (le $bestLength (len $path)) (hasPrefix $filePath $path) }}
|
||||||
|
{{ $scratch.Set "bestLength" (len $path) }}
|
||||||
|
{{ $scratch.Set "bestUrl" (printf "%s%s" (index $props "url") (strings.TrimPrefix $path $filePath)) }}
|
||||||
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
And that's what I'm using at the time of writing!
|
||||||
|
|
||||||
|
### Conclusion
|
||||||
|
My current system for code includes allows me to do the following
|
||||||
|
things:
|
||||||
|
|
||||||
|
* Include entire files or sections of files into the page. This
|
||||||
|
saves me from having to copy and paste code manually, which
|
||||||
|
is error prone and can cause inconsistencies.
|
||||||
|
* Provide links to the files I reference on my Git interface.
|
||||||
|
This allows users to easily view the entire file that I'm talking about.
|
||||||
|
* Correctly link to files in repositories other than my blog
|
||||||
|
repository, when they are included using submodules. This means
|
||||||
|
I don't need to manually copy and update code from other projects.
|
||||||
|
|
||||||
|
I hope some of these shortcodes and script come in handy for someone else.
|
||||||
|
Thank you for reading!
|
||||||
376
content/blog/coq_dawn.md
Normal file
376
content/blog/coq_dawn.md
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
---
|
||||||
|
title: "Formalizing Dawn in Coq"
|
||||||
|
date: 2021-11-20T19:04:57-08:00
|
||||||
|
tags: ["Coq", "Dawn", "Programming Languages"]
|
||||||
|
---
|
||||||
|
|
||||||
|
The [_Foundations of Dawn_](https://www.dawn-lang.org/posts/foundations-ucc/) article came up
|
||||||
|
on [Lobsters](https://lobste.rs/s/clatuv/foundations_dawn_untyped_concatenative) recently.
|
||||||
|
In this article, the author of Dawn defines a core calculus for the language, and provides its
|
||||||
|
semantics. The core calculus is called the _untyped concatenative calculus_, or UCC.
|
||||||
|
The definitions in the semantics seemed so clean and straightforward that I wanted to try my hand at
|
||||||
|
translating them into machine-checked code. I am most familiar with [Coq](https://coq.inria.fr/),
|
||||||
|
and that's what I reached for when making this attempt.
|
||||||
|
|
||||||
|
### Defining the Syntax
|
||||||
|
#### Expressions and Intrinsics
|
||||||
|
This is mostly the easy part. A UCC expression is one of three things:
|
||||||
|
|
||||||
|
* An "intrinsic", written \\(i\\), which is akin to a built-in function or command.
|
||||||
|
* A "quote", written \\([e]\\), which takes a UCC expression \\(e\\) and moves it onto the stack (UCC is stack-based).
|
||||||
|
* A composition of several expressions, written \\(e_1\\ e_2\\ \\ldots\\ e_n\\), which effectively evaluates them in order.
|
||||||
|
|
||||||
|
This is straightforward to define in Coq, but I'm going to make a little simplifying change.
|
||||||
|
Instead of making "composition of \\(n\\) expressions" a core language feature, I'll only
|
||||||
|
allow "composition of \\(e_1\\) and \\(e_2\\)", written \\(e_1\\ e_2\\). This change does not
|
||||||
|
in any way reduce the power of the language; we can still
|
||||||
|
{{< sidenote "right" "assoc-note" "write \(e_1\ e_2\ \ldots\ e_n\) as \((e_1\ e_2)\ \ldots\ e_n\)." >}}
|
||||||
|
The same expression can, of course, be written as \(e_1\ \ldots\ (e_{n-1}\ e_n)\).
|
||||||
|
So, which way should we <em>really</em> use when translating the many-expression composition
|
||||||
|
from the Dawn article into the two-expression composition I am using here? Well, the answer is,
|
||||||
|
it doesn't matter; expression composition is <em>associative</em>, so both ways effectively mean
|
||||||
|
the same thing.<br>
|
||||||
|
<br>
|
||||||
|
This is quite similar to what we do in algebra: the regular old addition operator, \(+\) is formally
|
||||||
|
only defined for pairs of numbers, like \(a+b\). However, no one really bats an eye when we
|
||||||
|
write \(1+2+3\), because we can just insert parentheses any way we like, and get the same result:
|
||||||
|
\((1+2)+3\) is the same as \(1+(2+3)\).
|
||||||
|
{{< /sidenote >}}
|
||||||
|
With that in mind, we can translate each of the three types of expressions in UCC into cases
|
||||||
|
of an inductive data type in Coq.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 12 15 >}}
|
||||||
|
|
||||||
|
Why do we need `e_int`? We do because a token like \\(\\text{swap}\\) can be viewed
|
||||||
|
as belonging to the set of intrinsics \\(i\\), or the set of expressions, \\(e\\). While writing
|
||||||
|
down the rules in mathematical notation, what exactly the token means is inferred from context - clearly
|
||||||
|
\\(\\text{swap}\\ \\text{drop}\\) is an expression built from two other expressions. In statically-typed
|
||||||
|
functional languages like Coq or Haskell, however, the same expression can't belong to two different,
|
||||||
|
arbitrary types. Thus, to turn an intrinsic into an expression, we need to wrap it up in a constructor,
|
||||||
|
which we called `e_int` here. Other than that, `e_quote` accepts as argument another expression, `e` (the
|
||||||
|
thing being quoted), and `e_comp` accepts two expressions, `e1` and `e2` (the two sub-expressions being composed).
|
||||||
|
|
||||||
|
The definition for intrinsics themselves is even simpler:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 4 10 >}}
|
||||||
|
|
||||||
|
We simply define a constructor for each of the six intrinsics. Since none of the intrinsic
|
||||||
|
names are reserved in Coq, we can just call our constructors exactly the same as their names
|
||||||
|
in the written formalization.
|
||||||
|
|
||||||
|
#### Values and Value Stacks
|
||||||
|
Values are up next. My initial thought was to define a value much like
|
||||||
|
I defined an intrinsic expression: by wrapping an expression in a constructor for a new data
|
||||||
|
type. Something like:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Inductive value :=
|
||||||
|
| v_quot (e : expr).
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, `v_quot (e_int swap)` would be the Coq translation of the expression \\([\\text{swap}]\\).
|
||||||
|
However, I didn't decide on this approach for two reasons:
|
||||||
|
|
||||||
|
* There are now two ways to write a quoted expression: either `v_quote e` to represent
|
||||||
|
a quoted expression that is a value, or `e_quote e` to represent a quoted expression
|
||||||
|
that is just an expression. In the extreme case, the value \\([[e]]\\) would
|
||||||
|
be represented by `v_quote (e_quote e)` - two different constructors for the same concept,
|
||||||
|
in the same expression!
|
||||||
|
* When formalizing the lambda calculus,
|
||||||
|
[Programming Language Foundations](https://softwarefoundations.cis.upenn.edu/plf-current/Stlc.html)
|
||||||
|
uses an inductively-defined property to indicate values. In the simply typed lambda calculus,
|
||||||
|
much like in UCC, values are a subset of expressions.
|
||||||
|
|
||||||
|
I took instead the approach from Programming Language Foundations: a value is merely an expression
|
||||||
|
for which some predicate, `IsValue`, holds. We will define this such that `IsValue (e_quote e)` is provable,
|
||||||
|
but also such that here is no way to prove `IsValue (e_int swap)`, since _that_ expression is not
|
||||||
|
a value. But what does "provable" mean, here?
|
||||||
|
|
||||||
|
By the [Curry-Howard correspondence](https://en.wikipedia.org/wiki/Curry%E2%80%93Howard_correspondence),
|
||||||
|
a predicate is just a function that takes _something_ and returns a type. Thus, if \\(\\text{Even}\\)
|
||||||
|
is a predicate, then \\(\\text{Even}\\ 3\\) is actually a type. Since \\(\\text{Even}\\) takes
|
||||||
|
numbers in, it is a predicate on numbers. Our \\(\\text{IsValue}\\) predicate will be a predicate
|
||||||
|
on expressions, instead. In Coq, we can write this as:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 19 19 >}}
|
||||||
|
|
||||||
|
You might be thinking,
|
||||||
|
|
||||||
|
> Huh, `Prop`? But you just said that predicates return types!
|
||||||
|
|
||||||
|
This is a good observation; In Coq, `Prop` is a special sort of type that corresponds to logical
|
||||||
|
propositions. It's special for a few reasons, but those reasons are beyond the scope of this post;
|
||||||
|
for our purposes, it's sufficient to think of `IsValue e` as a type.
|
||||||
|
|
||||||
|
Alright, so what good is this new `IsValue e` type? Well, we will define `IsValue` such that
|
||||||
|
this type is only _inhabited_ if `e` is a value according to the UCC specification. A type
|
||||||
|
is inhabited if and only if we can find a value of that type. For instance, the type of natural
|
||||||
|
numbers, `nat`, is inhabited, because any number, like `0`, has this type. Uninhabited types
|
||||||
|
are harder to come by, but take as an example the type `3 = 4`, the type of proofs that three is equal
|
||||||
|
to four. Three is _not_ equal to four, so we can never find a proof of equality, and thus, `3 = 4` is
|
||||||
|
uninhabited. As I said, `IsValue e` will only be inhabited if `e` is a value per the formal
|
||||||
|
specification of UCC; specifically, this means that `e` is a quoted expression, like `e_quote e'`.
|
||||||
|
|
||||||
|
To this end, we define `IsValue` as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 19 20 >}}
|
||||||
|
|
||||||
|
Now, `IsValue` is a new data type with only only constructor, `ValQuote`. For any expression `e`,
|
||||||
|
this constructor creates a value of type `IsValue (e_quote e)`. Two things are true here:
|
||||||
|
|
||||||
|
* Since `Val_quote` accepts any expression `e` to be put inside `e_quote`, we can use
|
||||||
|
`Val_quote` to create an `IsValue` instance for any quoted expression.
|
||||||
|
* Because `Val_quote` is the _only_ constructor, and because it always returns `IsValue (e_quote e)`,
|
||||||
|
there's no way to get `IsValue (e_int i)`, or anything else.
|
||||||
|
|
||||||
|
Thus, `IsValue e` is inhabited if and only if `e` is a UCC value, as we intended.
|
||||||
|
|
||||||
|
Just one more thing. A value is just an expression, but Coq only knows about this as long
|
||||||
|
as there's an `IsValue` instance around to vouch for it. To be able to reason about values, then,
|
||||||
|
we will need both the expression and its `IsValue` proof. Thus, we define the type `value` to mean
|
||||||
|
a pair of two things: an expression `v` and a proof that it's a value, `IsValue v`:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 22 22 >}}
|
||||||
|
|
||||||
|
A value stack is just a list of values:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 23 23 >}}
|
||||||
|
|
||||||
|
### Semantics
|
||||||
|
Remember our `IsValue` predicate? Well, it's not just any predicate, it's a _unary_ predicate.
|
||||||
|
_Unary_ means that it's a predicate that only takes one argument, an expression in our case. However,
|
||||||
|
this is far from the only type of predicate. Here are some examples:
|
||||||
|
|
||||||
|
* Equality, `=`, is a binary predicate in Coq. It takes two arguments, say `x` and `y`, and builds
|
||||||
|
a type `x = y` that is only inhabited if `x` and `y` are equal.
|
||||||
|
* The mathematical "less than" relation is also a binary predicate, and it's called `le` in Coq.
|
||||||
|
It takes two numbers `n` and `m` and returns a type `le n m` that is only inhabited if `n` is less
|
||||||
|
than or equal to `m`.
|
||||||
|
* The evaluation relation in UCC is a ternary predicate. It takes two stacks, `vs` and `vs'`,
|
||||||
|
and an expression, `e`, and creates a type that's inhabited if and only if evaluating
|
||||||
|
`e` starting at a stack `vs` results in the stack `vs'`.
|
||||||
|
|
||||||
|
Binary predicates are just functions of two inputs that return types. For instance, here's what Coq has
|
||||||
|
to say about the type of `eq`:
|
||||||
|
|
||||||
|
```
|
||||||
|
eq : ?A -> ?A -> Prop
|
||||||
|
```
|
||||||
|
|
||||||
|
By a similar logic, ternary predicates, much like UCC's evaluation relation, are functions
|
||||||
|
of three inputs. We can thus write the type of our evaluation relation as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 35 35 >}}
|
||||||
|
|
||||||
|
We define the constructors just like we did in our `IsValue` predicate. For each evaluation
|
||||||
|
rule in UCC, such as:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\langle V, v, v'\rangle\ \text{swap}\ \rightarrow\ \langle V, v', v \rangle
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
We introduce a constructor. For the `swap` rule mentioned above, the constructor looks like this:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 28 28 >}}
|
||||||
|
|
||||||
|
Although the stacks are written in reverse order (which is just a consequence of Coq's list notation),
|
||||||
|
I hope that the correspondence is fairly clear. If it's not, try reading this rule out loud:
|
||||||
|
|
||||||
|
> The rule `Sem_swap` says that for every two values `v` and `v'`, and for any stack `vs`,
|
||||||
|
evaluating `swap` in the original stack `v' :: v :: vs`, aka \\(\\langle V, v, v'\\rangle\\),
|
||||||
|
results in a final stack `v :: v' :: vs`, aka \\(\\langle V, v', v\\rangle\\).
|
||||||
|
|
||||||
|
With that in mind, here's a definition of a predicate `Sem_int`, the evaluation predicate
|
||||||
|
for intrinsics:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 27 33 >}}
|
||||||
|
|
||||||
|
Hey, what's all this with `v_quote` and `projT1`? It's just a little bit of bookkeeping.
|
||||||
|
Given a value -- a pair of an expression `e` and a proof `IsValue e` -- the function `projT1`
|
||||||
|
just returns the expression `e`. That is, it's basically a way of converting a value back into
|
||||||
|
an expression. The function `v_quote` takes us in the other direction: given an expression \\(e\\),
|
||||||
|
it constructs a quoted expression \\([e]\\), and combines it with a proof that the newly constructed
|
||||||
|
quote is a value.
|
||||||
|
|
||||||
|
The above two function in combination help us define the `quote` intrinsic, which
|
||||||
|
wraps a value on the stack in an additional layer of quotes. When we create a new quote, we
|
||||||
|
need to push it onto the value stack, so it needs to be a value; we thus use `v_quote`. However,
|
||||||
|
`v_quote` needs an expression to wrap in a quote, so we use `projT1` to extract the expression from
|
||||||
|
the value on top of the stack.
|
||||||
|
|
||||||
|
In addition to intrinsics, we also define the evaluation relation for actual expressions.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 35 39 >}}
|
||||||
|
|
||||||
|
Here, we may as well go through the three constructors to explain what they mean:
|
||||||
|
|
||||||
|
* `Sem_e_int` says that if the expression being evaluated is an intrinsic, and if the
|
||||||
|
intrinsic has an effect on the stack as described by `Sem_int` above, then the effect
|
||||||
|
of the expression itself is the same.
|
||||||
|
* `Sem_e_quote` says that if the expression is a quote, then a corresponding quoted
|
||||||
|
value is placed on top of the stack.
|
||||||
|
* `Sem_e_comp` says that if one expression `e1` changes the stack from `vs1` to `vs2`,
|
||||||
|
and if another expression `e2` takes this new stack `vs2` and changes it into `vs3`,
|
||||||
|
then running the two expressions one after another (i.e. composing them) means starting
|
||||||
|
at stack `vs1` and ending in stack `vs3`.
|
||||||
|
|
||||||
|
### \\(\\text{true}\\), \\(\\text{false}\\), \\(\\text{or}\\) and Proofs
|
||||||
|
Now it's time for some fun! The UCC language specification starts by defining two values:
|
||||||
|
true and false. Why don't we do the same thing?
|
||||||
|
|
||||||
|
|UCC Spec| Coq encoding |
|
||||||
|
|---|----|
|
||||||
|
|\\(\\text{false}\\)=\\([\\text{drop}]\\)| {{< codelines "Coq" "dawn/Dawn.v" 41 42 >}}
|
||||||
|
|\\(\\text{true}\\)=\\([\\text{swap} \\ \\text{drop}]\\)| {{< codelines "Coq" "dawn/Dawn.v" 44 45 >}}
|
||||||
|
|
||||||
|
Let's try prove that these two work as intended.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 47 53 >}}
|
||||||
|
|
||||||
|
This is the first real proof in this article. Rather than getting into the technical details,
|
||||||
|
I invite you to take a look at the "shape" of the proof:
|
||||||
|
|
||||||
|
* After the initial use of `intros`, which brings the variables `v`, `v`, and `vs` into
|
||||||
|
scope, we start by applying `Sem_e_comp`. Intuitively, this makes sense - at the top level,
|
||||||
|
our expression, \\(\\text{false}\\ \\text{apply}\\),
|
||||||
|
is a composition of two other expressions, \\(\\text{false}\\) and \\(\\text{apply}\\).
|
||||||
|
Because of this, we need to use the rule from our semantics that corresponds to composition.
|
||||||
|
* The composition rule requires that we describe the individual effects on the stack of the
|
||||||
|
two constituent expressions (recall that the first expression takes us from the initial stack `v1`
|
||||||
|
to some intermediate stack `v2`, and the second expression takes us from that stack `v2` to the
|
||||||
|
final stack `v3`). Thus, we have two "bullet points":
|
||||||
|
* The first expression, \\(\\text{false}\\), is just a quoted expression. Thus, the rule
|
||||||
|
`Sem_e_quote` applies, and the contents of the quote are puhsed onto the stack.
|
||||||
|
* The second expression, \\(\\text{apply}\\), is an intrinsic, so we need to use the rule
|
||||||
|
`Sem_e_int`, which handles the intrinsic case. This, in turn, requires that we show
|
||||||
|
the effect of the intrinsic itself; the `apply` intrinsic evaluates the quoted expression
|
||||||
|
on the stack.
|
||||||
|
The quoted expression contains the body of false, or \\(\\text{drop}\\). This is
|
||||||
|
once again an intrinsic, so we use `Sem_e_int`; the intrinsic in question is \\(\\text{drop}\\),
|
||||||
|
so the `Sem_drop` rule takes care of that.
|
||||||
|
|
||||||
|
Following these steps, we arrive at the fact that evaluating `false` on the stack simply drops the top
|
||||||
|
element, as specified. The proof for \\(\\text{true}\\) is very similar in spirit:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 55 63 >}}
|
||||||
|
|
||||||
|
We can also formalize the \\(\\text{or}\\) operator:
|
||||||
|
|
||||||
|
|UCC Spec| Coq encoding |
|
||||||
|
|---|----|
|
||||||
|
|\\(\\text{or}\\)=\\(\\text{clone}\\ \\text{apply}\\)| {{< codelines "Coq" "dawn/Dawn.v" 65 65 >}}
|
||||||
|
|
||||||
|
We can write two top-level proofs about how this works: the first says that \\(\\text{or}\\),
|
||||||
|
when the first argument is \\(\\text{false}\\), just returns the second argument (this is in agreement
|
||||||
|
with the truth table, since \\(\\text{false}\\) is the identity element of \\(\\text{or}\\)).
|
||||||
|
The proof proceeds much like before:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 67 73 >}}
|
||||||
|
|
||||||
|
To shorten the proof a little bit, I used the `Proof with` construct from Coq, which runs
|
||||||
|
an additional _tactic_ (like `apply`) whenever `...` is used.
|
||||||
|
Because of this, in this proof writing `apply Sem_apply...` is the same
|
||||||
|
as `apply Sem_apply. apply Sem_e_int`. Since the `Sem_e_int` rule is used a lot, this makes for a
|
||||||
|
very convenient shorthand.
|
||||||
|
|
||||||
|
Similarly, we prove that \\(\\text{or}\\) applied to \\(\\text{true}\\) always returns \\(\\text{true}\\).
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 75 83 >}}
|
||||||
|
|
||||||
|
Finally, the specific facts (like \\(\\text{false}\\ \\text{or}\\ \\text{false}\\) evaluating to \\(\\text{false}\\))
|
||||||
|
can be expressed using our two new proofs, `or_false_v` and `or_true`.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 85 88 >}}
|
||||||
|
|
||||||
|
### Derived Expressions
|
||||||
|
#### Quotes
|
||||||
|
The UCC specification defines \\(\\text{quote}_n\\) to make it more convenient to quote
|
||||||
|
multiple terms. For example, \\(\\text{quote}_2\\) composes and quotes the first two values
|
||||||
|
on the stack. This is defined in terms of other UCC expressions as follows:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\text{quote}_n = \text{quote}_{n-1}\ \text{swap}\ \text{quote}\ \text{swap}\ \text{compose}
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
We can write this in Coq as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 90 94 >}}
|
||||||
|
|
||||||
|
This definition diverges slightly from the one given in the UCC specification; particularly,
|
||||||
|
UCC's spec mentions that \\(\\text{quote}_n\\) is only defined for \\(n \\geq 1\\).However,
|
||||||
|
this means that in our code, we'd have to somehow handle the error that would arise if the
|
||||||
|
term \\(\\text{quote}\_0\\) is used. Instead, I defined `quote_n n` to simply mean
|
||||||
|
\\(\\text{quote}\_{n+1}\\); thus, in Coq, no matter what `n` we use, we will have a valid
|
||||||
|
expression, since `quote_n 0` will simply correspond to \\(\\text{quote}_1 = \\text{quote}\\).
|
||||||
|
|
||||||
|
We can now attempt to prove that this definition is correct by ensuring that the examples given
|
||||||
|
in the specification are valid. We may thus write,
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 96 106 >}}
|
||||||
|
|
||||||
|
We used a new tactic here, `repeat`, but overall, the structure of the proof is pretty straightforward:
|
||||||
|
the definition of `quote_n` consists of many intrinsics, and we apply the corresponding rules
|
||||||
|
one-by-one until we arrive at the final stack. Writing this proof was kind of boring, since
|
||||||
|
I just had to see which intrinsic is being used in each step, and then write a line of `apply`
|
||||||
|
code to handle that intrinsic. This gets worse for \\(\\text{quote}_3\\):
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 108 122 >}}
|
||||||
|
|
||||||
|
It's so long! Instead, I decided to try out Coq's `Ltac2` mechanism to teach Coq how
|
||||||
|
to write proofs like this itself. Here's what I came up with:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 124 136 >}}
|
||||||
|
|
||||||
|
You don't have to understand the details, but in brief, this checks what kind of proof
|
||||||
|
we're asking Coq to do (for instance, if we're trying to prove that a \\(\\text{swap}\\)
|
||||||
|
instruction has a particular effect), and tries to apply a corresponding semantic rule.
|
||||||
|
Thus, it will try `Sem_swap` if the expression is \\(\\text{swap}\\),
|
||||||
|
`Sem_clone` if the expression is \\(\\text{clone}\\), and so on. Then, the two proofs become:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 138 144 >}}
|
||||||
|
|
||||||
|
#### Rotations
|
||||||
|
There's a little trick to formalizing rotations. Values have an important property:
|
||||||
|
when a value is run against a stack, all it does is place itself on a stack. We can state
|
||||||
|
this as follows:
|
||||||
|
|
||||||
|
{{< latex >}}
|
||||||
|
\langle V \rangle\ v = \langle V\ v \rangle
|
||||||
|
{{< /latex >}}
|
||||||
|
|
||||||
|
Or, in Coq,
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 148 149 >}}
|
||||||
|
|
||||||
|
This is the trick to how \\(\\text{rotate}_n\\) works: it creates a quote of \\(n\\) reordered and composed
|
||||||
|
values on the stack, and then evaluates that quote. Since evaluating each value
|
||||||
|
just places it on the stack, these values end up back on the stack, in the same order that they
|
||||||
|
were in the quote. When writing the proof, `solve_basic ()` gets us almost all the way to the
|
||||||
|
end (evaluating a list of values against a stack). Then, we simply apply the composition
|
||||||
|
rule over and over, following it up with `eval_value` to prove that the each value is just being
|
||||||
|
placed back on the stack.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 156 168 >}}
|
||||||
|
|
||||||
|
### `e_comp` is Associative
|
||||||
|
When composing three expressions, which way of inserting parentheses is correct?
|
||||||
|
Is it \\((e_1\\ e_2)\\ e_3\\)? Or is it \\(e_1\\ (e_2\\ e_3)\\)? Well, both!
|
||||||
|
Expression composition is associative, which means that the order of the parentheses
|
||||||
|
doesn't matter. We state this in the following theorem, which says that the two
|
||||||
|
ways of writing the composition, if they evaluate to anything, evaluate to the same thing.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 170 171 >}}
|
||||||
|
|
||||||
|
### Conclusion
|
||||||
|
That's all I've got in me for today. However, we got pretty far! The UCC specification
|
||||||
|
says:
|
||||||
|
|
||||||
|
> One of my long term goals for UCC is to democratize formal software verification in order to make it much more feasible and realistic to write perfect software.
|
||||||
|
|
||||||
|
I think that UCC is definitely getting there: formally defining the semantics outlined
|
||||||
|
on the page was quite straightforward. We can now have complete confidence in the behavior
|
||||||
|
of \\(\\text{true}\\), \\(\\text{false}\\), \\(\\text{or}\\), \\(\\text{quote}_n\\) and
|
||||||
|
\\(\\text{rotate}_n\\). The proof of associativity is also enough to possibly argue for simplifying
|
||||||
|
the core calculus' syntax even more. All of this we got from an official source, with only
|
||||||
|
a little bit of tweaking to get from the written description of the language to code! I'm
|
||||||
|
looking forward to reading the next post about the _multistack_ concatenative calculus.
|
||||||
325
content/blog/coq_dawn_eval.md
Normal file
325
content/blog/coq_dawn_eval.md
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
---
|
||||||
|
title: "A Verified Evaluator for the Untyped Concatenative Calculus"
|
||||||
|
date: 2021-11-27T20:24:57-08:00
|
||||||
|
draft: true
|
||||||
|
tags: ["Dawn", "Coq", "Programming Languages"]
|
||||||
|
---
|
||||||
|
|
||||||
|
Earlier, I wrote [an article]({{< relref "./coq_dawn" >}}) in which I used Coq to
|
||||||
|
encode the formal semantics of [Dawn's Untyped Concatenative Calculus](https://www.dawn-lang.org/posts/foundations-ucc/),
|
||||||
|
and to prove a few thing about the mini-language. Though I'm quite happy with how that turned out,
|
||||||
|
my article was missing something that's present on the original Dawn page -- an evaluator. In this article, I'll define
|
||||||
|
an evaluator function in Coq, prove that it's equivalent to Dawn's formal semantics,
|
||||||
|
and extract all of this into usable Haskell code.
|
||||||
|
|
||||||
|
### Changes Since Last Time
|
||||||
|
In trying to write and verify this evaluator, I decided to make changes to the way I formalized
|
||||||
|
the UCC. Remember how we used a predicate, `IsValue`, to tag expressions that were values?
|
||||||
|
It turns out that this is a very cumbersome approach. For one thing, this formalization
|
||||||
|
allows for the case in which the exact same expression is a value for two different
|
||||||
|
reasons. Although `IsValue` has only one constructor (`Val_quote`), it's actually
|
||||||
|
{{< sidenote "right" "hott-note" "not provable" >}}
|
||||||
|
Interestingly, it's also not provable that any two proofs of \(a = b\) are equal,
|
||||||
|
even though equality only has one constructor, \(\text{eq_refl}\ :\ a \rightarrow (a = a) \).
|
||||||
|
Under the <a href="https://homotopytypetheory.org/book/">homotopic interpretation</a>
|
||||||
|
of type theory, this corresponds to the fact that two paths from \(a\) to \(b\) need
|
||||||
|
not be homotopic (continuously deformable) to each other.<br>
|
||||||
|
<br>
|
||||||
|
As an intuitive example, imagine wrapping a string around a pole. Holding the ends of
|
||||||
|
the string in place, there's nothing you can do to "unwrap" the string. This string
|
||||||
|
is thus not deformable into a string that starts and stops at the same points,
|
||||||
|
but does not go around the pole.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
that any two proofs of `IsValue e` are equal. I ended up getting into a lot of losing
|
||||||
|
arguments with the Coq runtime about whether or not two stacks are actually the same.
|
||||||
|
Also, some of the semantic rules expected a value on the stack with a particular proof
|
||||||
|
for `IsValue`, and rejected the exact same stack with a generic value.
|
||||||
|
|
||||||
|
Thus, I switched from our old implementation:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Dawn.v" 19 22 >}}
|
||||||
|
|
||||||
|
To the one I originally had in mind:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnV2.v" 19 19 >}}
|
||||||
|
|
||||||
|
I then had the following function to convert a value back into an equivalent expression:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnV2.v" 22 25 >}}
|
||||||
|
|
||||||
|
I replaced instances of `projT1` with instances of `value_to_expr`.
|
||||||
|
|
||||||
|
### Where We Are
|
||||||
|
At the end of my previous article, we ended up with a Coq encoding of the big-step
|
||||||
|
[operational semantics](https://en.wikipedia.org/wiki/Operational_semantics)
|
||||||
|
of UCC, as well as some proofs of correctness about the derived forms from
|
||||||
|
the article (like \\(\\text{quote}_3\\) and \\(\\text{rotate}_3\\)). The trouble
|
||||||
|
is, despite having our operational semantics, we can't make our Coq
|
||||||
|
code run anything. This is for several reasons:
|
||||||
|
|
||||||
|
1. Our definitions only let us to _confirm_ that given some
|
||||||
|
initial stack, a program ends up in some other final stack. We even have a
|
||||||
|
little `Ltac2` tactic to help us automate this kind of proof. However, in an evaluator,
|
||||||
|
the final stack is not known until the program finishes running. We can't
|
||||||
|
confirm the result of evaluation, we need to _find_ it.
|
||||||
|
2. To address the first point, we could try write a function that takes a program
|
||||||
|
and an initial stack, and produces a final stack, as well as a proof that
|
||||||
|
the program would evaluate to this stack under our semantics. However,
|
||||||
|
it's quite easy to write a non-terminating UCC program, whereas functions
|
||||||
|
in Coq _have to terminate!_ We can't write a terminating function to
|
||||||
|
run non-terminating code.
|
||||||
|
|
||||||
|
So, is there anything we can do? No, there isn't. Writing an evaluator in Coq
|
||||||
|
is just not possible. The end, thank you for reading.
|
||||||
|
|
||||||
|
Just kidding -- there's definitely a way to get our code evaluating, but it
|
||||||
|
will look a little bit strange.
|
||||||
|
|
||||||
|
### A Step-by-Step Evaluator
|
||||||
|
The trick is to recognize that program evaluation
|
||||||
|
occurs in steps. There may well be an infinite number of steps, if the program
|
||||||
|
is non-terminating, but there's always a step we can take. That is, unless
|
||||||
|
an invalid instruction is run, like trying to clone from an empty stack, or unless
|
||||||
|
the program finished running. You don't need a non-terminating function to just
|
||||||
|
give you a next step, if one exists. We can write such a function in Coq.
|
||||||
|
|
||||||
|
Here's a new data type that encodes the three situations we mentioned in the
|
||||||
|
previous paragraph. Its constructors (one per case) are as follows:
|
||||||
|
|
||||||
|
1. `err` - there are no possible evaluation steps due to an error.
|
||||||
|
3. `middle e s` - the evaluation took a step; the stack changed to `s`, and the rest of the program is `e`.
|
||||||
|
2. `final s` - there are no possible evaluation steps because the evaluation is complete,
|
||||||
|
leaving a final stack `s`.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 6 9 >}}
|
||||||
|
|
||||||
|
We can now write a function that tries to execute a single step given an expression.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 11 27 >}}
|
||||||
|
|
||||||
|
Most intrinsics, by themselves, complete after just one step. For instance, a program
|
||||||
|
consisting solely of \\(\\text{swap}\\) will either fail (if the stack doesn't have enough
|
||||||
|
values), or it will swap the top two values and be done. We list only "correct" cases,
|
||||||
|
and resort to a "catch-all" case on line 26 that returns an error. The one multi-step
|
||||||
|
intrinsic is \\(\\text{apply}\\), which can evaluate an arbitrary expression from the stack.
|
||||||
|
In this case, the "one step" consists of popping the quoted value from the stack; the
|
||||||
|
"remaining program" is precisely the expression that was popped.
|
||||||
|
|
||||||
|
Quoting an expression also always completes in one step (it simply places the quoted
|
||||||
|
expression on the stack). The really interesting case is composition. Expressions
|
||||||
|
are evaluated left-to-right, so we first determine what kind of step the left expression (`e1`)
|
||||||
|
can take. We may need more than one step to finish up with `e1`, so there's a good chance it
|
||||||
|
returns a "rest program" `e1'` and a stack `vs'`. If this happens, to complete evaluation of
|
||||||
|
\\(e_1\\ e_2\\), we need to first finish evaluating \\(e_1'\\), and then evaluate \\(e_2\\).
|
||||||
|
Thus, the "rest of the program" is \\(e_1'\\ e_2\\), or `e_comp e1' e2`. On the other hand,
|
||||||
|
if `e1` finished evaluating, we still need to evaluate `e2`, so our "rest of the program"
|
||||||
|
is \\(e_2\\), or `e2`. If evaluating `e1` led to an error, then so did evaluating `e_comp e1 e2`,
|
||||||
|
and we return `err`.
|
||||||
|
|
||||||
|
### Extracting Code
|
||||||
|
Just knowing a single step is not enough to run the code. We need something that repeatedly
|
||||||
|
tries to take a step, as long as it's possible. However, this part is once again
|
||||||
|
not possible in Coq, as it brings back the possibility of non-termination. So if we can't use
|
||||||
|
Coq, why don't we use another language? Coq's extraction mechanism allows us to do just that.
|
||||||
|
|
||||||
|
I added the following code to the end of my file:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 219 223 >}}
|
||||||
|
|
||||||
|
Coq happily produces a new file, `UccGen.hs` with a lot of code. It's not exactly the most
|
||||||
|
aesthetic; here's a quick peek:
|
||||||
|
|
||||||
|
```Haskell
|
||||||
|
data Intrinsic =
|
||||||
|
Swap
|
||||||
|
| Clone
|
||||||
|
| Drop
|
||||||
|
| Quote
|
||||||
|
| Compose
|
||||||
|
| Apply
|
||||||
|
|
||||||
|
data Expr =
|
||||||
|
E_int Intrinsic
|
||||||
|
| E_quote Expr
|
||||||
|
| E_comp Expr Expr
|
||||||
|
|
||||||
|
data Value =
|
||||||
|
V_quote Expr
|
||||||
|
|
||||||
|
-- ... more
|
||||||
|
```
|
||||||
|
|
||||||
|
All that's left is to make a new file, `Ucc.hs`. I use a different file so that
|
||||||
|
changes I make aren't overridden by Coq each time I run extraction. In this
|
||||||
|
file, we place the "glue code" that tries running one step after another:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/Ucc.hs" 46 51 >}}
|
||||||
|
|
||||||
|
Finally, loading up GHCi using `ghci Ucc.hs`, I can run the following commands:
|
||||||
|
|
||||||
|
```
|
||||||
|
ghci> fromList = foldl1 E_comp
|
||||||
|
ghci> test = eval [] $ fromList [true, false, UccGen.or]
|
||||||
|
ghci> :f test
|
||||||
|
test = Just [V_quote (E_comp (E_int Swap) (E_int Drop))]
|
||||||
|
```
|
||||||
|
|
||||||
|
That is, applying `or` to `true` and `false` results a stack with only `true` on top.
|
||||||
|
As expected, and proven by our semantics!
|
||||||
|
|
||||||
|
### Proving Equivalence
|
||||||
|
Okay, so `true false or` evaluates to `true`, much like our semantics claims.
|
||||||
|
However, does our evaluator _always_ match the semantics? So far, we have not
|
||||||
|
claimed or verified that it does. Let's try giving it a shot.
|
||||||
|
|
||||||
|
The first thing we can do is show that if we have a proof that `e` takes
|
||||||
|
initial stack `vs` to final stack `vs'`, then each
|
||||||
|
`eval_step` "makes progress" towards `vs'` (it doesn't simply _return_
|
||||||
|
`vs'`, since it only takes a single step and doesn't always complete
|
||||||
|
the evaluation). We also want to show that if the semanics dictates
|
||||||
|
`e` finishes in stack `vs'`, then `eval_step` will never return an error.
|
||||||
|
Thus, we have two possibilities:
|
||||||
|
|
||||||
|
* `eval_step` returns `final`. In this case, for it to match our semantics,
|
||||||
|
the final stack must be the same as `vs'`. Here's the relevant section
|
||||||
|
from the Coq file:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 30 30 >}}
|
||||||
|
* `eval_step` returns `middle`. In this case, the "rest of the program" needs
|
||||||
|
to evaluate to `vs'` according to our semantics (otherwise, taking a step
|
||||||
|
has changed the program's final outcome, which should not happen).
|
||||||
|
We need to quantify the new variables (specifically, the "rest of the
|
||||||
|
program", which we'll call `ei`, and the "stack after one step", `vsi`),
|
||||||
|
for which we use Coq's `exists` clause. The whole relevant statement is as
|
||||||
|
follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 31 33 >}}
|
||||||
|
|
||||||
|
The whole theorem claim is as follows:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 29 33 >}}
|
||||||
|
|
||||||
|
I have the Coq proof script for this (in fact, you can click the link
|
||||||
|
at the top of the code block to view the original source file). However,
|
||||||
|
there's something unsatisfying about this statement. In particular,
|
||||||
|
how do we prove that an entire sequence of steps evaluates
|
||||||
|
to something? We'd have to examine the first step, checking if
|
||||||
|
it's a "final" step or a "middle" step; if it's a "middle" step,
|
||||||
|
we'd have to move on to the "rest of the program" and repeat the process.
|
||||||
|
Each time we'd have to "retrieve" `ei` and `vsi` from `eval_step_correct`,
|
||||||
|
and feed it back to `eval_step`.
|
||||||
|
|
||||||
|
I'll do you one better: how do we even _say_ that an expression "evaluates
|
||||||
|
step-by-step to final stack `vs'`"? For one step, we can say:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
eval_step vs e = final vs'
|
||||||
|
```
|
||||||
|
|
||||||
|
For two steps, we'd have to assert the existence of an intermediate
|
||||||
|
expression (the "rest of the program" after the first step):
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
exists ei vsi,
|
||||||
|
eval_step vs e = middle ei vsi /\ (* First step to intermediate expression. *)
|
||||||
|
eval_step vsi ei = final vs' (* Second step to final state. *)
|
||||||
|
```
|
||||||
|
|
||||||
|
For three steps:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
exists ei1 ei2 vsi1 vsi2,
|
||||||
|
eval_step vs e = middle ei1 vsi1 /\ (* First step to intermediate expression. *)
|
||||||
|
eval_step vsi1 ei1 = middle ei2 vsi2 /\ (* Second intermediate step *)
|
||||||
|
eval_step vsi2 ei2 = final vs' (* Second step to final state. *)
|
||||||
|
```
|
||||||
|
|
||||||
|
This is awful! Not only is this a lot of writing, but it also makes various
|
||||||
|
sequences of steps have a different "shape". This way, we can't make
|
||||||
|
proofs about evalutions of an _arbitrary_ number of steps. Not all is lost, though: if we squint
|
||||||
|
a little at the last example (three steps), a pattern starts to emerge.
|
||||||
|
First, let's re-arrange the `exists` qualifiers:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
exists ei1 vsi1, eval_step vs e = middle ei1 vsi1 /\ (* Cons *)
|
||||||
|
exists ei2 vsi2, eval_step vsi1 ei1 = middle ei2 vsi2 /\ (* Cons *)
|
||||||
|
eval_step vsi2 ei2 = final vs' (* Nil *)
|
||||||
|
```
|
||||||
|
|
||||||
|
If you squint at this, it kind of looks like a list! The "empty"
|
||||||
|
part of a list is the final step, while the "cons" part is a middle step. The
|
||||||
|
analogy holds up for another reason: an "empty" sequence has zero intermediate
|
||||||
|
expressions, while each "cons" introduces a single new intermediate
|
||||||
|
program. Perhaps we can define a new data type that matches this? Indeed
|
||||||
|
we can!
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 64 67 >}}
|
||||||
|
|
||||||
|
The new data type is parameterized by the initial and final stacks, as well
|
||||||
|
as the expression that starts in the former and ends in the latter.
|
||||||
|
Then, consider the following _type_:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
eval_chain nil (e_comp (e_comp true false) or) (true :: nil)
|
||||||
|
```
|
||||||
|
|
||||||
|
This is the type of sequences (or chains) of steps corresponding to the
|
||||||
|
evaluation of the program \\(\\text{true}\\ \\text{false}\\ \\text{or}\\),
|
||||||
|
starting in an empty stack and evaluating to a stack with only
|
||||||
|
\\(\\text{true}\\) on top. Thus to say that an expression evaluates to some
|
||||||
|
final stack `vs'`, in _some unknown number of steps_, it's sufficient to write:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
eval_chain vs e vs'
|
||||||
|
```
|
||||||
|
|
||||||
|
Evaluation chains have a couple of interesting properties. First and foremost,
|
||||||
|
they can be "concatenated". This is analagous to the `Sem_e_comp` rule
|
||||||
|
in our original semantics: if an expression `e1` starts in stack `vs`
|
||||||
|
and finishes in stack `vs'`, and another expression starts in stack `vs'`
|
||||||
|
and finishes in stack `vs''`, then we can compose these two expressions,
|
||||||
|
and the result will start in `vs` and finish in `vs''`.
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 69 75 >}}
|
||||||
|
|
||||||
|
The proof is very short. We go
|
||||||
|
{{< sidenote "right" "concat-note" "by induction on the left evaluation" >}}
|
||||||
|
It's not a coincidence that defining something like <code>(++)</code>
|
||||||
|
(list concatenation) in Haskell typically starts by pattern matching
|
||||||
|
on the <em>left</em> list. In fact, proofs by <code>induction</code>
|
||||||
|
actually correspond to recursive functions! It's tough putting code blocks
|
||||||
|
in sidenotes, but if you're playing with the
|
||||||
|
<a href="https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/dawn/DawnEval.v"><code>DawnEval.v</code></a> file, try running
|
||||||
|
<code>Print eval_chain_ind</code>, and note the presence of <code>fix</code>,
|
||||||
|
the <a href="https://en.wikipedia.org/wiki/Fixed-point_combinator">fixed point
|
||||||
|
combinator</a> used to implement recursion.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
chain (the one for `e1`). Coq takes care of most of the rest with `auto`.
|
||||||
|
The key to this proof is that whatever `P` is cotained within a "node"
|
||||||
|
in the left chain determines how `eval_step (e_comp e1 e2)` behaves. Whether
|
||||||
|
`e1` evaluates to `final` or `middle`, the composition evaluates to `middle`
|
||||||
|
(a composition of two expressions cannot be done in one step), so we always
|
||||||
|
{{< sidenote "left" "cons-note" "create a new \"cons\" node." >}}
|
||||||
|
This is <em>unlike</em> list concatenation, since we typically don't
|
||||||
|
create new nodes when appending to an empty list.
|
||||||
|
{{< /sidenote >}} via `chain_middle`. Then, the two cases are as follows:
|
||||||
|
|
||||||
|
* If the step was `final`, then
|
||||||
|
the rest of the steps use only `e2`, and good news, we already have a chain
|
||||||
|
for that!
|
||||||
|
* Otherwise, the step was `middle`, an we stll have a chain for some
|
||||||
|
intermediate program `ei` that starts in some stack `vsi` and ends in `vs'`.
|
||||||
|
By induction, we know that _this_ chain can be concatenated with the one
|
||||||
|
for `e2`, resulting in a chain for `e_comp ei e2`. All that remains is to
|
||||||
|
attach to this sub-chain the one step from `(vs, e1)` to `(vsi, ei)`, which
|
||||||
|
is handled by `chain_middle`.
|
||||||
|
|
||||||
|
{{< todo >}}A drawing would be nice here!{{< /todo >}}
|
||||||
|
|
||||||
|
The `merge` operation is reversible; chains can be split into two pieces,
|
||||||
|
one for each composed expression:
|
||||||
|
|
||||||
|
{{< codelines "Coq" "dawn/DawnEval.v" 77 78 >}}
|
||||||
|
|
||||||
|
While interesting, this particular fact isn't used anywhere else in this
|
||||||
|
post, and I will omit the proof here.
|
||||||
83
content/blog/coq_docs/index.md
Normal file
83
content/blog/coq_docs/index.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
---
|
||||||
|
title: "I Don't Like Coq's Documentation"
|
||||||
|
date: 2021-11-24T21:48:59-08:00
|
||||||
|
draft: true
|
||||||
|
tags: ["Coq"]
|
||||||
|
---
|
||||||
|
|
||||||
|
Recently, I wrote an article on [Formalizing Dawn's Core Calculus in Coq]({{< relref "./coq_dawn.md" >}}).
|
||||||
|
One of the proofs (specifically, correctness of \\(\\text{quote}_3\\)) was the best candidate I've ever
|
||||||
|
encountered for proof automation. I knew that proof automation was possible from the second book of Software
|
||||||
|
Foundations, [Programming Language Foundations](https://softwarefoundations.cis.upenn.edu/plf-current/index.html).
|
||||||
|
I went there to learn more, and started my little journey in picking up Coq's `Ltac2`.
|
||||||
|
|
||||||
|
Before I go any further, let me say that I'd self-describe as an "advanced beginner" in Coq, maybe "intermediate"
|
||||||
|
on a good day. I am certainly far from a master. I will also say that I am quite young, and thus possibly spoiled
|
||||||
|
by the explosion of well-documented languages, tools, and libraries. I don't frequently check `man` pages, and I don't
|
||||||
|
often read straight up technical manuals. Maybe the fault lies with me.
|
||||||
|
Nevertheles, I feel like I am where I am in the process of learning Coq
|
||||||
|
in part because of the state of its learning resources.
|
||||||
|
As a case study, let's take my attempt to automate away a pretty simple proof.
|
||||||
|
|
||||||
|
### Grammars instead of Examples
|
||||||
|
I did not specifically remember the chapter of Software Foundation in which tactics were introduced.
|
||||||
|
Instead of skimming through chapters until I found it, I tried to look up "coq custom tactic". The
|
||||||
|
first thing that comes up is the page for `Ltac`.
|
||||||
|
|
||||||
|
After a brief explanation of what `Ltac` is, the documentation jumps straight into the grammar of the entire
|
||||||
|
tactic language. Here' a screenshot of what that looks like:
|
||||||
|
|
||||||
|
{{< figure src="ltac_grammar.png" caption="The grammar of Ltac from the Coq page." class="large" alt="A grammar for the `Ltac` language within Coq. The grammar is in Backus–Naur form, and has 9 nonterminals." >}}
|
||||||
|
|
||||||
|
Good old Backus-Naur form. Now, my two main areas of interest are programming language theory and compilers, and so I'm no stranger to BNF. In fact, the first
|
||||||
|
time I saw such a documentation page (most pages describing Coq language feature have some production rules), I didn't even consciously process that I was looking
|
||||||
|
at grammar rules. However, and despite CompCert (a compiler) being probably the most well known example of a project in Coq, I don't think that Coq is made _just_
|
||||||
|
for people familiar with PLT or compilers. I don't think it should be expected for the average newcomer to Coq (or the average beginner-ish person like me) to know how to read production rules, or to know
|
||||||
|
what terminals and nonterminals are. Logical Founadtions sure managed to explain Gallina without resorting to BNFs.
|
||||||
|
|
||||||
|
And even if I, as a newcomer, know what BNF is, and how to read the rules, there's another layer to this specification:
|
||||||
|
the precedence of various operators is encoded in the BNF rules. This is a pretty common pattern
|
||||||
|
for writing down programming language grammars; for each level of operator precedence, there's another
|
||||||
|
nonterminal. We have `ltac_expr4` for sequencing (the least binding operator), and `ltac_expr3` for "level 3 tactics",
|
||||||
|
`ltac_expr2` for addition, logical "or", and "level 2 tactics". The creators of this documentation page clearly knew
|
||||||
|
what they were getting at here, and I've seen this pattern enough times to recognize it right away. But if you
|
||||||
|
_haven't_ seen this pattern before (and why should you have?), you'll need to take some time to decipher the rules.
|
||||||
|
That's time that you'd rather spend trying to write your tactic.
|
||||||
|
|
||||||
|
The page could just as well have mentioned the types of constructs in `Ltac`, and given a table of their relative precedence.
|
||||||
|
This could be an opportunity to give an example of what a program in `Ltac` looks like. Instead, despite having seen all of these nonterminals,
|
||||||
|
I still don't have an image in my mind's eye of what the language looks like. And better yet, I think that the grammar's incorrect:
|
||||||
|
|
||||||
|
```
|
||||||
|
ltac_expr4 ::= ltac_expr3 ; ltac_expr3|binder_tactic
|
||||||
|
```
|
||||||
|
|
||||||
|
The way this is written, there's no way to sequence (using a semicolon) more than two things. The semicolon
|
||||||
|
only occurs on level four, and both nonterminals in this rule are level three. However, Coq is perfectly happy
|
||||||
|
to accept the following:
|
||||||
|
|
||||||
|
```Coq
|
||||||
|
Ltac test := auto; auto; auto.
|
||||||
|
```
|
||||||
|
|
||||||
|
In the `Ltac2` grammar, this is written the way I'd expect:
|
||||||
|
|
||||||
|
```
|
||||||
|
ltac2_expr ::= ltac2_expr5 ; ltac2_expr
|
||||||
|
```
|
||||||
|
|
||||||
|
Let's do a quick recap. We have an encoding that requires a degree of experience with grammars and
|
||||||
|
programming languages to be useful to the reader, _and_ this encoding
|
||||||
|
{{< sidenote "right" "errors-note" "leaves room for errors," >}}
|
||||||
|
Here's a question to ponder: how come this error has gone unnoticed? Surely
|
||||||
|
people used this page to learn <code>Ltac</code>. I believe that part of the reason is
|
||||||
|
pattern matching: an experienced reader will recognize the "precedence trick", and quickly
|
||||||
|
scan the grammar levels to estblish precedence. The people writing and proofreading this
|
||||||
|
documentation likely read it this way, too.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
errors that _actually appear in practice_.
|
||||||
|
{{< sidenote "left" "achilles-note" "We have a very precise, yet incorrect definition." >}}
|
||||||
|
Amusingly, I think this is very close to what is considered the achilles heel of formal verification:
|
||||||
|
software that precisely adheres to an incorrect or incomplete specification.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
Somehow, "a sequence of statements separated by a semicolon" seems like a simpler explanation.
|
||||||
BIN
content/blog/coq_docs/ltac_grammar.png
Normal file
BIN
content/blog/coq_docs/ltac_grammar.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 188 KiB |
BIN
content/blog/haskell_lazy_evaluation/lazy-fix.zip
Normal file
BIN
content/blog/haskell_lazy_evaluation/lazy-fix.zip
Normal file
Binary file not shown.
BIN
content/blog/haskell_lazy_evaluation/lazy.zip
Normal file
BIN
content/blog/haskell_lazy_evaluation/lazy.zip
Normal file
Binary file not shown.
79
content/blog/hugo_functions.md
Normal file
79
content/blog/hugo_functions.md
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
---
|
||||||
|
title: "Approximating Custom Functions in Hugo"
|
||||||
|
date: 2021-01-17T18:44:53-08:00
|
||||||
|
tags: ["Hugo"]
|
||||||
|
---
|
||||||
|
|
||||||
|
This will be an uncharacteristically short post. Recently,
|
||||||
|
I wrote about my experience with [including code from local files]({{< relref "codelines" >}}).
|
||||||
|
After I wrote that post, I decided to expand upon my setup. In particular,
|
||||||
|
I wanted to display links to the files I'm referring to, in three
|
||||||
|
different cases: when I'm referring to an entire code file, to an entire raw (non-highlighted)
|
||||||
|
file, or to a portion of a code file.
|
||||||
|
|
||||||
|
The problem was that in all three cases, I needed to determine the
|
||||||
|
correct file URL to link to. The process for doing so is identical: it
|
||||||
|
really only depends on the path to the file I'm including. However,
|
||||||
|
many other aspects of each case are different. In the "entire code file"
|
||||||
|
case, I simply need to read in a file. In the "portion of a code file"
|
||||||
|
case, I have to perform some processing to extract the specific lines I want to include.
|
||||||
|
Whenever I include a code file -- entirely or partially -- I need to invoke the `highlight`
|
||||||
|
function to perform syntax highlighting; however, I don't want to do that when including a raw file.
|
||||||
|
It would be difficult to write a single shortcode or partial to handle all of these different cases.
|
||||||
|
|
||||||
|
However, computing the target URL is a simple transformation
|
||||||
|
of a path and a list of submodules into a link. More plainly,
|
||||||
|
it is a function. Hugo doesn't really have support for
|
||||||
|
custom functions, at least according to this [Discourse post](https://discourse.gohugo.io/t/adding-custom-functions/14164). The only approach to add a _real_ function to Hugo is to edit the Go-based
|
||||||
|
source code, and recompile the thing. However, your own custom functions
|
||||||
|
would then not be included in normal Hugo distributions, and any websites
|
||||||
|
using these functions would not be portable. _Really_ adding your own functions
|
||||||
|
is not viable.
|
||||||
|
|
||||||
|
However, we can approximate functions using Hugo's
|
||||||
|
[scratchpad feature](https://gohugo.io/functions/scratch/)
|
||||||
|
By feeding a
|
||||||
|
{{< sidenote "right" "mutable-note" "scratchpad" >}}
|
||||||
|
In reality, any mutable container will work. The scratchpad
|
||||||
|
just seems like the perfect tool for this purpose.
|
||||||
|
{{< /sidenote >}}
|
||||||
|
to a partial, and expecting the partial to modify the
|
||||||
|
scratchpad in some way, we can effectively recover data.
|
||||||
|
For instance, in my `geturl` partial, I have something like
|
||||||
|
the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ .scratch.Set "bestUrl" theUrl }}
|
||||||
|
```
|
||||||
|
|
||||||
|
Once this partial executes, and the rendering engine is back to its call site,
|
||||||
|
the scratchpad will contain `bestUrl`. To call this partial while providing inputs
|
||||||
|
(like the file path, for example), we can use Hugo's `dict` function. An (abridged)
|
||||||
|
example:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ partial "geturl.html" (dict "scratch" .Scratch "path" filePath) }}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, from inside the partial, we'll be able to access the two variable using `.scratch` and `.path`.
|
||||||
|
Once we've called our partial, we simply extract the returned data from the scratchpad and use it:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ partial "geturl.html" (dict "scratch" .Scratch "path" filePath) }}
|
||||||
|
{{ $bestUrl := .Scratch.Get "bestUrl" }}
|
||||||
|
{{ ... do stuff with $bestUrl ... }}
|
||||||
|
```
|
||||||
|
|
||||||
|
Thus, although it's a little bit tedious, we're able to use `geturl` like a function,
|
||||||
|
thereby refraining from duplicating its definition everywhere the same logic is needed. A few
|
||||||
|
closing thoughts:
|
||||||
|
|
||||||
|
* __Why not just use a real language?__ It's true that I wrote a Ruby script to
|
||||||
|
do some of the work involved with linking submodules. However, generating the same
|
||||||
|
information for all calls to `codelines` would complicate the process of rendering
|
||||||
|
the blog, and make live preview impossible. In general, by limiting the use of external
|
||||||
|
scripts, it's easier to make `hugo` the only "build tool" for the site.
|
||||||
|
* __Is there an easier way?__ I _think_ that calls to `partial` return a string. If you
|
||||||
|
simply wanted to return a string, you could probably do without using a scratchpad.
|
||||||
|
However, if you wanted to do something more complicated (say, return a map or list),
|
||||||
|
you'd probably want the scratchpad method after all.
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: "A Typesafe Representation of an Imperative Language"
|
title: "A Typesafe Representation of an Imperative Language"
|
||||||
date: 2020-11-02T01:07:21-08:00
|
date: 2020-11-02T01:07:21-08:00
|
||||||
tags: ["Idris"]
|
tags: ["Idris", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
A recent homework assignment for my university's programming languages
|
A recent homework assignment for my university's programming languages
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: Meaningfully Typechecking a Language in Idris
|
title: Meaningfully Typechecking a Language in Idris
|
||||||
date: 2020-02-27T21:58:55-08:00
|
date: 2020-02-27T21:58:55-08:00
|
||||||
tags: ["Haskell", "Idris"]
|
tags: ["Haskell", "Idris", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
This term, I'm a TA for Oregon State University's Programming Languages course.
|
This term, I'm a TA for Oregon State University's Programming Languages course.
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
---
|
---
|
||||||
title: Meaningfully Typechecking a Language in Idris, Revisited
|
title: Meaningfully Typechecking a Language in Idris, Revisited
|
||||||
date: 2020-07-22T14:37:35-07:00
|
date: 2020-07-22T14:37:35-07:00
|
||||||
tags: ["Idris"]
|
tags: ["Idris", "Programming Languages"]
|
||||||
|
favorite: true
|
||||||
---
|
---
|
||||||
|
|
||||||
Some time ago, I wrote a post titled [Meaningfully Typechecking a Language in Idris]({{< relref "typesafe_interpreter.md" >}}). The gist of the post was as follows:
|
Some time ago, I wrote a post titled [Meaningfully Typechecking a Language in Idris]({{< relref "typesafe_interpreter.md" >}}). The gist of the post was as follows:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: Meaningfully Typechecking a Language in Idris, With Tuples
|
title: Meaningfully Typechecking a Language in Idris, With Tuples
|
||||||
date: 2020-08-12T15:48:04-07:00
|
date: 2020-08-12T15:48:04-07:00
|
||||||
tags: ["Idris"]
|
tags: ["Idris", "Programming Languages"]
|
||||||
---
|
---
|
||||||
|
|
||||||
Some time ago, I wrote a post titled
|
Some time ago, I wrote a post titled
|
||||||
|
|||||||
120
content/blog/typescript_typesafe_events.md
Normal file
120
content/blog/typescript_typesafe_events.md
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
---
|
||||||
|
title: "Type-Safe Event Emitter in TypeScript"
|
||||||
|
date: 2021-09-04T17:18:49-07:00
|
||||||
|
tags: ["TypeScript"]
|
||||||
|
---
|
||||||
|
|
||||||
|
I've been playing around with TypeScript recently, and enjoying it too.
|
||||||
|
Nearly all of my compile-time type safety desires have been accomodated
|
||||||
|
by the language, and in a rather intuitive and clean way. Today, I'm going
|
||||||
|
to share a little trick I've discovered which allows me to do something that
|
||||||
|
I suspect would normally require [dependent types](https://en.wikipedia.org/wiki/Dependent_type).
|
||||||
|
|
||||||
|
### The Problem
|
||||||
|
Suppose you want to write a class that emits events. Clients can then install handlers,
|
||||||
|
functions that are notified whenever an event is emitted. Easy enough; in JavaScript,
|
||||||
|
this would look something like the following:
|
||||||
|
|
||||||
|
{{< codelines "JavaScript" "typescript-emitter/js1.js" 1 17 >}}
|
||||||
|
|
||||||
|
We can even write some code to test that this works (just to ease my nerves):
|
||||||
|
|
||||||
|
{{< codelines "JavaScript" "typescript-emitter/js1.js" 19 23 >}}
|
||||||
|
|
||||||
|
As expected, we get:
|
||||||
|
|
||||||
|
```
|
||||||
|
Ended!
|
||||||
|
Started!
|
||||||
|
```
|
||||||
|
|
||||||
|
As you probably guessed, we're going to build on this problem a little bit.
|
||||||
|
In certain situations, you don't just care that an event occured; you also
|
||||||
|
care about additional event data. For instance, when a number changes, you
|
||||||
|
may want to know the number's new value. In JavaScript, this is a trivial change:
|
||||||
|
|
||||||
|
{{< codelines "JavaScript" "typescript-emitter/js2.js" 1 17 "hl_lines = 6-8" >}}
|
||||||
|
|
||||||
|
That's literally it. Once again, let's ensure that this works by sending two new events:
|
||||||
|
`stringChange` and `numberChange`.
|
||||||
|
|
||||||
|
{{< codelines "JavaScript" "typescript-emitter/js2.js" 19 23 >}}
|
||||||
|
|
||||||
|
The result of this code is once again unsurprising:
|
||||||
|
|
||||||
|
```
|
||||||
|
New number value is: 1
|
||||||
|
New string value is: 3
|
||||||
|
```
|
||||||
|
|
||||||
|
But now, how would one go about encoding this in TypeScript? In particular, what is the
|
||||||
|
type of a handler? We could, of course, give each handler the type `(value: any) => void`.
|
||||||
|
This, however, makes handlers unsafe. We could very easily write:
|
||||||
|
|
||||||
|
```TypeScript
|
||||||
|
emitter.addHandler("numberChanged", (value: string) => {
|
||||||
|
console.log("String length is", value.length);
|
||||||
|
});
|
||||||
|
emitted.emit("numberChanged", 1);
|
||||||
|
```
|
||||||
|
|
||||||
|
Which would print out:
|
||||||
|
|
||||||
|
```
|
||||||
|
String length is undefined
|
||||||
|
```
|
||||||
|
|
||||||
|
No, I don't like this. TypeScript is supposed to be all about adding type safety to our code,
|
||||||
|
and this is not at all type safe. We could do all sorts of weird things! There is a way,
|
||||||
|
however, to make this use case work.
|
||||||
|
|
||||||
|
### The Solution
|
||||||
|
|
||||||
|
Let me show you what I came up with:
|
||||||
|
|
||||||
|
{{< codelines "TypeScript" "typescript-emitter/ts.ts" 1 19 "hl_lines=1 2 8 12">}}
|
||||||
|
|
||||||
|
The important changes are on lines 1, 2, 8, and 12 (highlighted in the above code block).
|
||||||
|
Let's go through each one of them step-by-step.
|
||||||
|
|
||||||
|
* __Line 1__: Parameterize the `EventEmitter` by some type `T`. We will use this type `T`
|
||||||
|
to specify the exact kind of events that our `EventEmitter` will be able to emit and handle.
|
||||||
|
Specifically, this type will be in the form `{ event: EventValueType }`. For example,
|
||||||
|
for a `mouseClick` event, we may write `{ mouseClick: { x: number, y: number }}`.
|
||||||
|
* __Line 2__: Add a proper type to `handlers`. This requires several ingredients of its own:
|
||||||
|
* We use [index signatures](https://www.typescriptlang.org/docs/handbook/2/objects.html#index-signatures)
|
||||||
|
to limit the possible names to which handlers can be assigned. We limit these names
|
||||||
|
to the keys of our type `T`; in the preceding example, `keyof T` would be `"mouseClick"`.
|
||||||
|
* We also limit the values: `T[eventName]` retrieves the type of the value associated with
|
||||||
|
key `eventName`. In the mouse example, this type would be `{ x: number, y: number }`. We require
|
||||||
|
that a key can only be associated with an array of functions to void, each of which accepts
|
||||||
|
`T[K]` as first argument. This is precisely what we want; for example, `mouseClick` would map to
|
||||||
|
an array of functions that accept the mouse click location.
|
||||||
|
* __Line 8__: We restrict the type of `emit` to only accept values that correspond to the keys
|
||||||
|
of the type `T`. We can't simply write `event: keyof T`, because this would not give us enough
|
||||||
|
information to retrieve the type of `value`: if `event` is just `keyof T`,
|
||||||
|
then `value` can be any of the values of `T`. Instead, we use generics; this way, when the
|
||||||
|
function is called with `"mouseClick"`, the type of `K` is inferred to also be `"mouseClick"`, which
|
||||||
|
gives TypeScript enough information to narrow the type of `value`.
|
||||||
|
* __Line 12__: We use the exact same trick here as we did on line 8.
|
||||||
|
|
||||||
|
Let's give this a spin with our `numberChange`/`stringChange` example from earlier:
|
||||||
|
|
||||||
|
{{< codelines "TypeScript" "typescript-emitter/ts.ts" 21 27 >}}
|
||||||
|
|
||||||
|
The function calls on lines 24 and 25 are correct, but the subsequent two (on lines 26 and 27)
|
||||||
|
are not, as they attempt to emit the _opposite_ type of the one they're supposed to. And indeed,
|
||||||
|
TypeScript complains about only these two lines:
|
||||||
|
|
||||||
|
```
|
||||||
|
code/typescript-emitter/ts.ts:26:30 - error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'.
|
||||||
|
26 emitter.emit("numberChange", "1");
|
||||||
|
~~~
|
||||||
|
code/typescript-emitter/ts.ts:27:30 - error TS2345: Argument of type 'number' is not assignable to parameter of type 'string'.
|
||||||
|
27 emitter.emit("stringChange", 3);
|
||||||
|
~
|
||||||
|
Found 2 errors.
|
||||||
|
```
|
||||||
|
|
||||||
|
And there you have it! This approach is now also in use in [Hydrogen](https://github.com/vector-im/hydrogen-web),
|
||||||
|
a lightweight chat client for the [Matrix](https://matrix.org/) protocol. In particular, check out [`EventEmitter.ts`](https://github.com/vector-im/hydrogen-web/blob/master/src/utils/EventEmitter.ts).
|
||||||
9
content/favorites.md
Normal file
9
content/favorites.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
title: Favorites
|
||||||
|
type: "favorites"
|
||||||
|
description: Posts from Daniel's personal blog that he has enjoyed writing the most, or that he thinks turned out very well.
|
||||||
|
---
|
||||||
|
The amount of content on this blog is monotonically increasing. Thus, as time goes on, it's becoming
|
||||||
|
harder and harder to see at a glance what kind of articles I write. To address this, I've curated
|
||||||
|
a small selection of articles from this site that I've particularly enjoyed writing, or that I think
|
||||||
|
turned out especially well. They're listed below, most recent first.
|
||||||
14
content/search.md
Normal file
14
content/search.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
title: Search
|
||||||
|
type: "search"
|
||||||
|
description: Interactive search for posts on Daniel's personal site.
|
||||||
|
---
|
||||||
|
|
||||||
|
Here's a [Stork](https://github.com/jameslittle230/stork)-powered search for all articles on
|
||||||
|
this site. Stork takes some time to load on slower devices, which is why this isn't on
|
||||||
|
every page (or even on the index page). Because the LaTeX rendering occurs _after_
|
||||||
|
the search indexing, you may see raw LaTeX code in the content of the presented
|
||||||
|
articles, like `\beta`. This does, however, also mean that you can search for mathematical
|
||||||
|
symbols using only the English alphabet!
|
||||||
|
|
||||||
|
If you're just browsing, you could alternatively check out [all posts](/blog), or perhaps my [favorite articles](/favorites) from this blog.
|
||||||
2
layouts/shortcodes/donate_css.html
Normal file
2
layouts/shortcodes/donate_css.html
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{{ $style := resources.Get "scss/donate.scss" | resources.ToCSS | resources.Minify }}
|
||||||
|
<link rel="stylesheet" href="{{ $style.Permalink }}">
|
||||||
4
layouts/shortcodes/donation_method.html
Normal file
4
layouts/shortcodes/donation_method.html
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<tr>
|
||||||
|
<td>{{ .Get 0 }}</td>
|
||||||
|
<td><code>{{ .Get 1 }}</code></td>
|
||||||
|
</tr>
|
||||||
3
layouts/shortcodes/donation_methods.html
Normal file
3
layouts/shortcodes/donation_methods.html
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
<table class="donation-methods">
|
||||||
|
{{ .Inner }}
|
||||||
|
</table>
|
||||||
Binary file not shown.
BIN
static/index.st
BIN
static/index.st
Binary file not shown.
27
submodule-links.rb
Normal file
27
submodule-links.rb
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
puts "[params]"
|
||||||
|
puts " [params.submoduleLinks]"
|
||||||
|
|
||||||
|
def each_submodule(base_path)
|
||||||
|
`cd #{base_path} && git submodule status`.lines do |line|
|
||||||
|
hash, path = line[1..].split " "
|
||||||
|
full_path = "#{base_path}/#{path}"
|
||||||
|
url = `git config --file #{base_path}/.gitmodules --get 'submodule.#{path}.url'`.chomp.delete_suffix(".git")
|
||||||
|
safe_name = full_path.gsub(/\/|-|_\./, "")
|
||||||
|
|
||||||
|
if url =~ /dev.danilafe.com/
|
||||||
|
file_url = "#{url}/src/commit/#{hash}"
|
||||||
|
else
|
||||||
|
raise "Submodule URL #{url.dump} not in a known format!"
|
||||||
|
end
|
||||||
|
|
||||||
|
yield ({ :path => full_path, :url => file_url, :name => safe_name })
|
||||||
|
each_submodule(full_path) { |m| yield m }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
each_submodule(".") do |m|
|
||||||
|
next unless m[:path].start_with? "./code/"
|
||||||
|
puts " [params.submoduleLinks.#{m[:name].delete_prefix(".code")}]"
|
||||||
|
puts " url = #{m[:url].dump}"
|
||||||
|
puts " path = #{m[:path].delete_prefix("./code/").dump}"
|
||||||
|
end
|
||||||
1
themes/vanilla
Submodule
1
themes/vanilla
Submodule
Submodule themes/vanilla added at b56ac908f6
@@ -1,20 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2019 YOUR_NAME_HERE
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
+++
|
|
||||||
+++
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 376 B |
Binary file not shown.
|
Before Width: | Height: | Size: 536 B |
@@ -1,93 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
|
|
||||||
$code-color-lineno: grey;
|
|
||||||
$code-color-keyword: black;
|
|
||||||
$code-color-type: black;
|
|
||||||
$code-color-comment: grey;
|
|
||||||
|
|
||||||
.highlight-label {
|
|
||||||
padding: 0.25rem 0.5rem 0.25rem 0.5rem;
|
|
||||||
border: $code-border;
|
|
||||||
border-bottom: none;
|
|
||||||
|
|
||||||
a {
|
|
||||||
font-family: $font-code;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
code {
|
|
||||||
font-family: $font-code;
|
|
||||||
background-color: $code-color;
|
|
||||||
border: $code-border;
|
|
||||||
padding: 0 0.25rem 0 0.25rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre code {
|
|
||||||
display: block;
|
|
||||||
box-sizing: border-box;
|
|
||||||
padding: 0.5rem;
|
|
||||||
overflow: auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
.chroma {
|
|
||||||
.lntable {
|
|
||||||
border-spacing: 0;
|
|
||||||
padding: 0.5rem 0 0.5rem 0;
|
|
||||||
background-color: $code-color;
|
|
||||||
border-radius: 0;
|
|
||||||
border: $code-border;
|
|
||||||
display: block;
|
|
||||||
overflow: auto;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
|
|
||||||
td {
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
code {
|
|
||||||
border: none;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre {
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.lntd:last-child {
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.lntr {
|
|
||||||
display: table-row;
|
|
||||||
}
|
|
||||||
|
|
||||||
.lnt {
|
|
||||||
display: block;
|
|
||||||
padding: 0 1rem 0 1rem;
|
|
||||||
color: $code-color-lineno;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hl {
|
|
||||||
display: block;
|
|
||||||
background-color: #fffd99;
|
|
||||||
|
|
||||||
.lnt::before {
|
|
||||||
content: "*";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.kr, .k {
|
|
||||||
font-weight: bold;
|
|
||||||
color: $code-color-keyword;
|
|
||||||
}
|
|
||||||
|
|
||||||
.kt {
|
|
||||||
font-weight: bold;
|
|
||||||
color: $code-color-type;
|
|
||||||
}
|
|
||||||
|
|
||||||
.c, .c1 {
|
|
||||||
color: $code-color-comment;
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
@import "mixins.scss";
|
|
||||||
|
|
||||||
$margin-width: 30rem;
|
|
||||||
$margin-inner-offset: 0.5rem;
|
|
||||||
$margin-outer-offset: 1rem;
|
|
||||||
|
|
||||||
@mixin below-two-margins {
|
|
||||||
@media screen and
|
|
||||||
(max-width: $container-width-threshold +
|
|
||||||
2 * ($margin-width + $margin-inner-offset + $margin-outer-offset)) {
|
|
||||||
@content;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin below-one-margin {
|
|
||||||
@media screen and
|
|
||||||
(max-width: $container-width-threshold +
|
|
||||||
($margin-width + $margin-inner-offset + $margin-outer-offset)) {
|
|
||||||
@content;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin margin-content {
|
|
||||||
display: block;
|
|
||||||
position: absolute;
|
|
||||||
width: $margin-width;
|
|
||||||
box-sizing: border-box;
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin margin-content-left {
|
|
||||||
left: 0;
|
|
||||||
margin-left: -($margin-width + $container-min-padding + $margin-inner-offset);
|
|
||||||
|
|
||||||
@include below-two-margins {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin margin-content-right {
|
|
||||||
right: 0;
|
|
||||||
margin-right: -($margin-width + $container-min-padding + $margin-inner-offset);
|
|
||||||
|
|
||||||
@include below-one-margin {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
|
|
||||||
@mixin bordered-block {
|
|
||||||
border: $standard-border;
|
|
||||||
border-radius: .2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin below-container-width {
|
|
||||||
@media screen and (max-width: $container-width-threshold){
|
|
||||||
@content;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
@import "mixins.scss";
|
|
||||||
|
|
||||||
$search-input-padding: 0.5rem;
|
|
||||||
$search-element-padding: 0.5rem 1rem 0.5rem 1rem;
|
|
||||||
|
|
||||||
@mixin green-shadow {
|
|
||||||
box-shadow: 0px 0px 5px rgba($primary-color, 0.7);
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-input-wrapper {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: row;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
input.stork-input {
|
|
||||||
@include bordered-block;
|
|
||||||
font-family: $font-body;
|
|
||||||
padding: $search-input-padding;
|
|
||||||
|
|
||||||
&:active, &:focus {
|
|
||||||
@include green-shadow;
|
|
||||||
border-color: $primary-color;
|
|
||||||
}
|
|
||||||
|
|
||||||
flex-grow: 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-close-button {
|
|
||||||
@include bordered-block;
|
|
||||||
font-family: $font-body;
|
|
||||||
padding: $search-input-padding;
|
|
||||||
|
|
||||||
background-color: $code-color;
|
|
||||||
padding-left: 1.5rem;
|
|
||||||
padding-right: 1.5rem;
|
|
||||||
|
|
||||||
border-left: none;
|
|
||||||
border-top-left-radius: 0;
|
|
||||||
border-bottom-left-radius: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-output-visible {
|
|
||||||
@include bordered-block;
|
|
||||||
|
|
||||||
border-top: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-result, .stork-message, .stork-attribution {
|
|
||||||
padding: $search-element-padding;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-message:not(:last-child), .stork-result {
|
|
||||||
border-bottom: $standard-border;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-results {
|
|
||||||
margin: 0;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-result {
|
|
||||||
list-style: none;
|
|
||||||
|
|
||||||
&.selected {
|
|
||||||
background-color: $code-color;
|
|
||||||
}
|
|
||||||
|
|
||||||
a:hover {
|
|
||||||
color: black;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-title, .stork-excerpt {
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-excerpt {
|
|
||||||
white-space: nowrap;
|
|
||||||
overflow: hidden;
|
|
||||||
text-overflow: ellipsis;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-title {
|
|
||||||
font-family: $font-heading;
|
|
||||||
font-size: 1.4rem;
|
|
||||||
text-align: left;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
.stork-highlight {
|
|
||||||
background-color: lighten($primary-color, 30%);
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
@import "mixins.scss";
|
|
||||||
@import "margin.scss";
|
|
||||||
|
|
||||||
$sidenote-padding: 1rem;
|
|
||||||
$sidenote-highlight-border-width: .2rem;
|
|
||||||
|
|
||||||
.sidenote {
|
|
||||||
&:hover {
|
|
||||||
.sidenote-label {
|
|
||||||
background-color: $primary-color;
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sidenote-content {
|
|
||||||
border: $sidenote-highlight-border-width dashed;
|
|
||||||
padding: $sidenote-padding -
|
|
||||||
($sidenote-highlight-border-width - $standard-border-width);
|
|
||||||
border-color: $primary-color;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.sidenote-label {
|
|
||||||
border-bottom: .2rem dashed $primary-color;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sidenote-checkbox {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.sidenote-content {
|
|
||||||
@include margin-content;
|
|
||||||
@include bordered-block;
|
|
||||||
margin-top: -1.5rem;
|
|
||||||
padding: $sidenote-padding;
|
|
||||||
text-align: left;
|
|
||||||
|
|
||||||
&.sidenote-right {
|
|
||||||
@include margin-content-right;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.sidenote-left {
|
|
||||||
@include margin-content-left;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.sidenote-delimiter {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
@mixin hidden-sidenote {
|
|
||||||
position: static;
|
|
||||||
margin-top: 1rem;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
width: 100%;
|
|
||||||
|
|
||||||
.sidenote-checkbox:checked ~ & {
|
|
||||||
display: block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@include below-two-margins {
|
|
||||||
.sidenote-content.sidenote-left {
|
|
||||||
@include hidden-sidenote;
|
|
||||||
margin-left: 0rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@include below-one-margin {
|
|
||||||
.sidenote-content.sidenote-right {
|
|
||||||
@include hidden-sidenote;
|
|
||||||
margin-right: 0rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,242 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
@import "mixins.scss";
|
|
||||||
@import "margin.scss";
|
|
||||||
@import "toc.scss";
|
|
||||||
|
|
||||||
body {
|
|
||||||
font-family: $font-body;
|
|
||||||
font-size: 1.0rem;
|
|
||||||
line-height: 1.5;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
text-align: justify;
|
|
||||||
|
|
||||||
@include below-container-width {
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h1, h2, h3, h4, h5, h6 {
|
|
||||||
margin-bottom: .1rem;
|
|
||||||
margin-top: .5rem;
|
|
||||||
font-family: $font-heading;
|
|
||||||
font-weight: normal;
|
|
||||||
text-align: center;
|
|
||||||
|
|
||||||
a {
|
|
||||||
border-bottom: none;
|
|
||||||
|
|
||||||
&:hover {
|
|
||||||
color: $primary-color;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.container {
|
|
||||||
position: relative;
|
|
||||||
margin: auto;
|
|
||||||
width: 100%;
|
|
||||||
max-width: $container-width;
|
|
||||||
box-sizing: border-box;
|
|
||||||
|
|
||||||
@include below-container-width {
|
|
||||||
padding: 0 $container-min-padding 0 $container-min-padding;
|
|
||||||
margin: 0;
|
|
||||||
max-width: $container-width + 2 * $container-min-padding;
|
|
||||||
}
|
|
||||||
|
|
||||||
@include below-two-margins {
|
|
||||||
left: -($margin-width + $margin-inner-offset + $margin-outer-offset)/2;
|
|
||||||
}
|
|
||||||
|
|
||||||
@include below-one-margin {
|
|
||||||
left: 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.button, input[type="submit"] {
|
|
||||||
padding: 0.5rem;
|
|
||||||
background-color: $primary-color;
|
|
||||||
border: none;
|
|
||||||
color: white;
|
|
||||||
transition: color 0.25s, background-color 0.25s;
|
|
||||||
text-align: left;
|
|
||||||
|
|
||||||
&:focus {
|
|
||||||
outline: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
&:hover, &:focus {
|
|
||||||
background-color: white;
|
|
||||||
color: $primary-color;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nav {
|
|
||||||
width: 100%;
|
|
||||||
margin: 0rem 0rem 1rem 0rem;
|
|
||||||
|
|
||||||
.container {
|
|
||||||
display: flex;
|
|
||||||
justify-content: center;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
a {
|
|
||||||
padding: 0.25rem 0.75rem 0.25rem .75rem;
|
|
||||||
text-decoration: none;
|
|
||||||
color: black;
|
|
||||||
display: inline-block;
|
|
||||||
border-bottom: none;
|
|
||||||
white-space: nowrap;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.post-subscript {
|
|
||||||
color: #8f8f8f;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.post-content {
|
|
||||||
margin-top: .5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
font-size: 3.0rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h2 {
|
|
||||||
font-size: 2.6rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h3 {
|
|
||||||
font-size: 2.2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h4 {
|
|
||||||
font-size: 1.8rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h5 {
|
|
||||||
font-size: 1.4rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
h6 {
|
|
||||||
font-size: 1.0rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
a {
|
|
||||||
color: black;
|
|
||||||
text-decoration: none;
|
|
||||||
border-bottom: .2rem solid $primary-color;
|
|
||||||
transition: color 0.25s;
|
|
||||||
|
|
||||||
&:hover {
|
|
||||||
color: $primary-color;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
img {
|
|
||||||
max-width: 100%
|
|
||||||
}
|
|
||||||
|
|
||||||
table {
|
|
||||||
@include bordered-block;
|
|
||||||
margin: auto;
|
|
||||||
padding: 0.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
tr {
|
|
||||||
@include below-container-width {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
td {
|
|
||||||
@include below-container-width {
|
|
||||||
overflow-x: auto;
|
|
||||||
}
|
|
||||||
padding: 0.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
div.highlight tr {
|
|
||||||
display: table-row;
|
|
||||||
}
|
|
||||||
|
|
||||||
hr.header-divider {
|
|
||||||
background-color: $primary-color;
|
|
||||||
height: 0.3rem;
|
|
||||||
border: none;
|
|
||||||
border-radius: 0.15rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
ul.post-list {
|
|
||||||
list-style: none;
|
|
||||||
padding: 0;
|
|
||||||
|
|
||||||
li {
|
|
||||||
@include bordered-block;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
padding: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
p {
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
a.post-title {
|
|
||||||
border-bottom: none;
|
|
||||||
font-size: 1.4rem;
|
|
||||||
font-family: $font-heading;
|
|
||||||
text-align: center;
|
|
||||||
display: block;
|
|
||||||
}
|
|
||||||
|
|
||||||
p.post-wordcount {
|
|
||||||
text-align: center;
|
|
||||||
margin-bottom: 0.6rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.katex-html {
|
|
||||||
white-space: nowrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
figure {
|
|
||||||
img {
|
|
||||||
max-width: 70%;
|
|
||||||
display: block;
|
|
||||||
margin: auto;
|
|
||||||
|
|
||||||
@include below-container-width {
|
|
||||||
max-width: 100%;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
figcaption {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.tiny img {
|
|
||||||
max-height: 15rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.small img {
|
|
||||||
max-height: 20rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.medium img {
|
|
||||||
max-height: 30rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.twitter-tweet {
|
|
||||||
margin: auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
.draft-warning {
|
|
||||||
@include bordered-block;
|
|
||||||
padding: 0.5rem;
|
|
||||||
background-color: #ffee99;
|
|
||||||
border-color: #f5c827;
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
@import "variables.scss";
|
|
||||||
@import "mixins.scss";
|
|
||||||
|
|
||||||
$toc-color: $code-color;
|
|
||||||
$toc-border-color: $code-border-color;
|
|
||||||
|
|
||||||
.table-of-contents {
|
|
||||||
@include margin-content;
|
|
||||||
@include margin-content-left;
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
align-items: flex-end;
|
|
||||||
margin-bottom: 1rem;
|
|
||||||
|
|
||||||
em {
|
|
||||||
font-style: normal;
|
|
||||||
font-weight: bold;
|
|
||||||
font-size: 1.2em;
|
|
||||||
display: block;
|
|
||||||
margin-bottom: 0.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
#TableOfContents > ul {
|
|
||||||
padding-left: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
nav {
|
|
||||||
margin: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
ul {
|
|
||||||
list-style: none;
|
|
||||||
padding-left: 2rem;
|
|
||||||
margin: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
a {
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
div.wrapper {
|
|
||||||
@include bordered-block;
|
|
||||||
padding: 1rem;
|
|
||||||
background-color: $toc-color;
|
|
||||||
border-color: $toc-border-color;
|
|
||||||
box-sizing: border-box;
|
|
||||||
max-width: 100%;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
$container-width: 45rem;
|
|
||||||
$container-min-padding: 1rem;
|
|
||||||
$container-width-threshold: $container-width + 2 * $container-min-padding;
|
|
||||||
$standard-border-width: .075rem;
|
|
||||||
|
|
||||||
$primary-color: #36e281;
|
|
||||||
$border-color: #bfbfbf;
|
|
||||||
$code-color: #f0f0f0;
|
|
||||||
$code-border-color: darken($code-color, 10%);
|
|
||||||
|
|
||||||
$font-heading: "Lora", serif;
|
|
||||||
$font-body: "Raleway", serif;
|
|
||||||
$font-code: "Inconsolata", monospace;
|
|
||||||
|
|
||||||
$standard-border: $standard-border-width solid $border-color;
|
|
||||||
$code-border: $standard-border-width solid $code-border-color;
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="{{ .Site.Language.Lang }}">
|
|
||||||
{{- partial "head.html" . -}}
|
|
||||||
<body>
|
|
||||||
{{- partial "header.html" . -}}
|
|
||||||
<div class="container"><hr class="header-divider"></div>
|
|
||||||
<main class="container">
|
|
||||||
{{- block "main" . }}{{- end }}
|
|
||||||
</main>
|
|
||||||
{{- partial "footer.html" . -}}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
{{- block "main" . }}{{- end }}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
<h2>{{ .Title }}</h2>
|
|
||||||
|
|
||||||
<ul class="post-list">
|
|
||||||
{{ range .Pages.ByDate.Reverse }}
|
|
||||||
{{ partial "post.html" . }}
|
|
||||||
{{ end }}
|
|
||||||
</ul>
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
[input]
|
|
||||||
base_directory = "content/"
|
|
||||||
title_boost = "Large"
|
|
||||||
files = [
|
|
||||||
{{ range $index , $e := .Site.RegularPages }}{{ if $index }}, {{end}}{ filetype = "PlainText", contents = {{ $e.Plain | jsonify }}, title = {{ $e.Title | jsonify }}, url = {{ $e.Permalink | jsonify }} }
|
|
||||||
{{ end }}
|
|
||||||
]
|
|
||||||
|
|
||||||
[output]
|
|
||||||
filename = "index.st"
|
|
||||||
excerpts_per_result = 2
|
|
||||||
displayed_results_count = 5
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
<h2>{{ .Title }}</h2>
|
|
||||||
{{ .Content }}
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
{{ .Content }}
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
<h2>{{ .Title }}</h2>
|
|
||||||
<div class="post-subscript">
|
|
||||||
<p>
|
|
||||||
{{ range .Params.tags }}
|
|
||||||
<a class="button" href="{{ $.Site.BaseURL }}/tags/{{ . | urlize }}">{{ . }}</a>
|
|
||||||
{{ end }}
|
|
||||||
</p>
|
|
||||||
<p>Posted on {{ .Date.Format "January 2, 2006" }}.</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="post-content">
|
|
||||||
{{ if not (eq .TableOfContents "<nav id=\"TableOfContents\"></nav>") }}
|
|
||||||
<div class="table-of-contents">
|
|
||||||
<div class="wrapper">
|
|
||||||
<em>Table of Contents</em>
|
|
||||||
{{ .TableOfContents }}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ if .Draft }}
|
|
||||||
<div class="draft-warning">
|
|
||||||
<em>Warning!</em> This post is a draft. At best, it may contain grammar mistakes;
|
|
||||||
at worst, it can include significant errors and bugs. Please
|
|
||||||
use your best judgement!
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ .Content }}
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
{{ .Content }}
|
|
||||||
|
|
||||||
<p>
|
|
||||||
<div class="stork-wrapper">
|
|
||||||
<div class="stork-input-wrapper">
|
|
||||||
<input class="stork-input" data-stork="blog" placeholder="Search (requires JavaScript)"/>
|
|
||||||
</div>
|
|
||||||
<div class="stork-output" data-stork="blog-output"></div>
|
|
||||||
</div>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
Recent posts:
|
|
||||||
<ul class="post-list">
|
|
||||||
{{ range first 10 (where (where .Site.Pages.ByDate.Reverse "Section" "blog") ".Kind" "!=" "section") }}
|
|
||||||
{{ partial "post.html" . }}
|
|
||||||
{{ end }}
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<script src="https://files.stork-search.net/stork.js"></script>
|
|
||||||
<script>stork.register("blog", "/index.st");</script>
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<meta name="theme-color" content="#1dc868">
|
|
||||||
{{ if .Description }}
|
|
||||||
<meta name="description" content="{{ .Description }}">
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Inconsolata:wght@400;700&family=Raleway&family=Lora&display=block" media="screen">
|
|
||||||
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/normalize/5.0.0/normalize.min.css" media="screen">
|
|
||||||
{{ $style := resources.Get "scss/style.scss" | resources.ToCSS | resources.Minify }}
|
|
||||||
{{ $sidenotes := resources.Get "scss/sidenotes.scss" | resources.ToCSS | resources.Minify }}
|
|
||||||
{{ $code := resources.Get "scss/code.scss" | resources.ToCSS | resources.Minify }}
|
|
||||||
{{ $search := resources.Get "scss/search.scss" | resources.ToCSS | resources.Minify }}
|
|
||||||
{{ $icon := resources.Get "img/favicon.png" }}
|
|
||||||
{{- partial "sidenotes.html" . -}}
|
|
||||||
<link rel="stylesheet" href="{{ $style.Permalink }}" media="screen">
|
|
||||||
<link rel="stylesheet" href="{{ $sidenotes.Permalink }}" media="screen">
|
|
||||||
<link rel="stylesheet" href="{{ $code.Permalink }}" media="screen">
|
|
||||||
<link rel="stylesheet" href="{{ $search.Permalink }}" media="screen">
|
|
||||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" integrity="sha384-zB1R0rpPzHqg7Kpt0Aljp8JPLqbXI3bhnPWROx27a9N0Ll6ZP/+DiW/UqRcLbRjq" crossorigin="anonymous" media="screen">
|
|
||||||
<link rel="icon" type="image/png" href="{{ $icon.Permalink }}">
|
|
||||||
|
|
||||||
<title>{{ .Title }}</title>
|
|
||||||
</head>
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
<div class="container">
|
|
||||||
<h1>Daniel's Blog</h1>
|
|
||||||
</div>
|
|
||||||
<nav>
|
|
||||||
<div class="container">
|
|
||||||
<a href="/">Home</a>
|
|
||||||
<a href="/about">About</a>
|
|
||||||
<a href="https://github.com/DanilaFe">GitHub</a>
|
|
||||||
<a href="/Resume-Danila-Fedorin.pdf">Resume</a>
|
|
||||||
<a href="/tags">Tags</a>
|
|
||||||
<a href="/blog">All Posts</a>
|
|
||||||
</div>
|
|
||||||
</nav>
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
<li>
|
|
||||||
<a href="{{ .Permalink }}" class="post-title">{{ .Title }}</a>
|
|
||||||
<p class="post-wordcount">{{ .WordCount }} words, about {{ .ReadingTime }} minutes to read.</p>
|
|
||||||
<p class="post-preview">{{ .Summary }} . . .</p>
|
|
||||||
</li>
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
<style>
|
|
||||||
.sidenote-checkbox {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
{{- $pctx := . -}}
|
|
||||||
{{- if .IsHome -}}{{ $pctx = .Site }}{{- end -}}
|
|
||||||
{{- $pages := slice -}}
|
|
||||||
{{- if or $.IsHome $.IsSection -}}
|
|
||||||
{{- $pages = $pctx.RegularPages -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $pages = $pctx.Pages -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- $limit := .Site.Config.Services.RSS.Limit -}}
|
|
||||||
{{- if ge $limit 1 -}}
|
|
||||||
{{- $pages = $pages | first $limit -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- printf "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>" | safeHTML }}
|
|
||||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
|
||||||
<channel>
|
|
||||||
<title>{{ if eq .Title .Site.Title }}{{ .Site.Title }}{{ else }}{{ with .Title }}{{.}} on {{ end }}{{ .Site.Title }}{{ end }}</title>
|
|
||||||
<link>{{ .Permalink }}</link>
|
|
||||||
<description>Recent content {{ if ne .Title .Site.Title }}{{ with .Title }}in {{.}} {{ end }}{{ end }}on {{ .Site.Title }}</description>
|
|
||||||
<generator>Hugo -- gohugo.io</generator>{{ with .Site.LanguageCode }}
|
|
||||||
<language>{{.}}</language>{{end}}{{ with .Site.Author.email }}
|
|
||||||
<managingEditor>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</managingEditor>{{end}}{{ with .Site.Author.email }}
|
|
||||||
<webMaster>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</webMaster>{{end}}{{ with .Site.Copyright }}
|
|
||||||
<copyright>{{.}}</copyright>{{end}}{{ if not .Date.IsZero }}
|
|
||||||
<lastBuildDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</lastBuildDate>{{ end }}
|
|
||||||
{{ with .OutputFormats.Get "RSS" }}
|
|
||||||
{{ printf "<atom:link href=%q rel=\"self\" type=%q />" .Permalink .MediaType | safeHTML }}
|
|
||||||
{{ end }}
|
|
||||||
{{ range $pages }}
|
|
||||||
<item>
|
|
||||||
<title>{{ .Title }}</title>
|
|
||||||
<link>{{ .Permalink }}</link>
|
|
||||||
<pubDate>{{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }}</pubDate>
|
|
||||||
{{ with .Site.Author.email }}<author>{{.}}{{ with $.Site.Author.name }} ({{.}}){{end}}</author>{{end}}
|
|
||||||
<guid>{{ .Permalink }}</guid>
|
|
||||||
<description>{{ .Content | html }}</description>
|
|
||||||
</item>
|
|
||||||
{{ end }}
|
|
||||||
</channel>
|
|
||||||
</rss>
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
{{ highlight (readFile (printf "code/%s" (.Get 1))) (.Get 0) "" }}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
{{ $s := (readFile (printf "code/%s" (.Get 1))) }}
|
|
||||||
{{ $t := split $s "\n" }}
|
|
||||||
{{ if not (eq (int (.Get 2)) 1) }}
|
|
||||||
{{ .Scratch.Set "u" (after (sub (int (.Get 2)) 1) $t) }}
|
|
||||||
{{ else }}
|
|
||||||
{{ .Scratch.Set "u" $t }}
|
|
||||||
{{ end }}
|
|
||||||
{{ $v := first (add (sub (int (.Get 3)) (int (.Get 2))) 1) (.Scratch.Get "u") }}
|
|
||||||
{{ if (.Get 4) }}
|
|
||||||
{{ .Scratch.Set "opts" (printf ",%s" (.Get 4)) }}
|
|
||||||
{{ else }}
|
|
||||||
{{ .Scratch.Set "opts" "" }}
|
|
||||||
{{ end }}
|
|
||||||
<div class="highlight-group">
|
|
||||||
<div class="highlight-label">From <a href="https://dev.danilafe.com/Web-Projects/blog-static/src/branch/master/code/{{ .Get 1 }}">{{ path.Base (.Get 1) }}</a>,
|
|
||||||
{{ if eq (.Get 2) (.Get 3) }}line {{ .Get 2 }}{{ else }} lines {{ .Get 2 }} through {{ .Get 3 }}{{ end }}</div>
|
|
||||||
{{ highlight (delimit $v "\n") (.Get 0) (printf "linenos=table,linenostart=%d%s" (.Get 2) (.Scratch.Get "opts")) }}
|
|
||||||
</div>
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
$$
|
|
||||||
{{ .Inner }}
|
|
||||||
$$
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
{{ .Page.Scratch.Add "numbernote-id" 1 }}
|
|
||||||
{{ $id := .Page.Scratch.Get "numbernote-id" }}
|
|
||||||
<span class="sidenote">
|
|
||||||
<label class="sidenote-label" for="numbernote-{{ $id }}">({{ $id }})</label>
|
|
||||||
<input class="sidenote-checkbox" type="checkbox" id="numbernote-{{ $id }}"></input>
|
|
||||||
<span class="sidenote-content sidenote-{{ .Get 0 }}">
|
|
||||||
<span class="sidenote-delimiter">[note:</span>
|
|
||||||
{{ .Inner }}
|
|
||||||
<span class="sidenote-delimiter">]</span>
|
|
||||||
</span>
|
|
||||||
</span>
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
<pre><code>{{ readFile (printf "code/%s" (.Get 0)) }}</code></pre>
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
<span class="sidenote">
|
|
||||||
<label class="sidenote-label" for="{{ .Get 1 }}">{{ .Get 2 }}</label>
|
|
||||||
<input class="sidenote-checkbox" type="checkbox" id="{{ .Get 1 }}"></input>
|
|
||||||
<span class="sidenote-content sidenote-{{ .Get 0 }}">
|
|
||||||
<span class="sidenote-delimiter">[note:</span>
|
|
||||||
{{ .Inner }}
|
|
||||||
<span class="sidenote-delimiter">]</span>
|
|
||||||
</span>
|
|
||||||
</span>
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
<div style="background-color: tomato; color: white; padding: 10px;">
|
|
||||||
<em>TODO: </em>{{ .Inner }}
|
|
||||||
</div>
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
<h2>Tagged "{{ .Title }}"</h2>
|
|
||||||
|
|
||||||
<ul class="post-list">
|
|
||||||
{{ range .Pages.ByDate.Reverse }}
|
|
||||||
{{ partial "post.html" . }}
|
|
||||||
{{ end }}
|
|
||||||
</ul>
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
{{ define "main" }}
|
|
||||||
<h2>{{ .Title }}</h2>
|
|
||||||
Below is a list of all the tags ever used on this site.
|
|
||||||
|
|
||||||
<ul>
|
|
||||||
{{ range sort .Pages "Title" }}
|
|
||||||
<li><a href="{{ .Permalink }}">{{ .Title }}</a></li>
|
|
||||||
{{ end }}
|
|
||||||
</ul>
|
|
||||||
{{ end }}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
# theme.toml template for a Hugo theme
|
|
||||||
# See https://github.com/gohugoio/hugoThemes#themetoml for an example
|
|
||||||
|
|
||||||
name = "Vanilla"
|
|
||||||
license = "MIT"
|
|
||||||
# licenselink = "https://github.com/yourname/yourtheme/blob/master/LICENSE"
|
|
||||||
# description = ""
|
|
||||||
# homepage = "http://example.com/"
|
|
||||||
# tags = []
|
|
||||||
# features = []
|
|
||||||
min_version = "0.41"
|
|
||||||
|
|
||||||
[author]
|
|
||||||
name = "Danila Fedorin"
|
|
||||||
homepage = "https://danilafe.com"
|
|
||||||
|
|
||||||
# If porting an existing theme
|
|
||||||
# [original]
|
|
||||||
# name = ""
|
|
||||||
# homepage = ""
|
|
||||||
# repo = ""
|
|
||||||
Reference in New Issue
Block a user