20 Commits

Author SHA1 Message Date
7a088d6739 Edit, finish, and publish post on PL
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-12-31 21:27:55 -08:00
626baefd76 Do some aggressive trimming and editing.
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-12-30 00:06:17 -08:00
4602d02720 [WIP] 2/3 draft
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-12-29 23:07:55 -08:00
7fbd4ea9f8 Update theme with syntax highlighting fix
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-04-12 17:56:44 -07:00
6fd1e1962b Update theme
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-30 23:15:54 -07:00
62c338e382 Add a post on Chapel's runtime types
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 22:56:10 -08:00
40ea9ec637 Update theme for newer Hugo
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 19:00:05 -08:00
787e194d41 Update theme with new Hugo support
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 13:42:42 -08:00
71162e2db9 Update nokogiri more
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 13:36:08 -08:00
43debc65e4 Update nokogiri
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 13:27:09 -08:00
b07ea85b70 Update ruby scripts to use 'File.exist?'
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-03-02 11:32:55 -08:00
06e8b8e022 Keep KaTeX css files
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 17:01:49 -08:00
647f47a5f3 Remove KaTeX CSS includes if we don't need them.
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 14:43:49 -08:00
36e4feb668 Update theme
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 14:14:15 -08:00
11be991946 Print number of files processed 2025-02-23 13:25:18 -08:00
a8c2b1d05a Fix bug in subsetting script
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 13:16:33 -08:00
fb46142e9d Remove incorrect print
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 12:45:24 -08:00
0b33d03b73 Pass the font folder as part of argv
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 12:35:45 -08:00
804147caef Read SVG path from the command line
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 12:34:44 -08:00
d847d20666 Adjust Python script to also just accept HTML files as args
Signed-off-by: Danila Fedorin <danila.fedorin@gmail.com>
2025-02-23 12:28:19 -08:00
17 changed files with 1050 additions and 36 deletions

View File

@@ -3,11 +3,11 @@ GEM
specs: specs:
duktape (2.7.0.0) duktape (2.7.0.0)
execjs (2.9.1) execjs (2.9.1)
mini_portile2 (2.8.6) mini_portile2 (2.8.8)
nokogiri (1.15.6) nokogiri (1.18.3)
mini_portile2 (~> 2.8.2) mini_portile2 (~> 2.8.2)
racc (~> 1.4) racc (~> 1.4)
racc (1.8.0) racc (1.8.1)
PLATFORMS PLATFORMS
ruby ruby

View File

@@ -23,7 +23,7 @@ class AgdaContext
return @file_infos[file] if @file_infos.include? file return @file_infos[file] if @file_infos.include? file
@file_infos[file] = line_infos = {} @file_infos[file] = line_infos = {}
unless File.exists?(file) unless File.exist?(file)
return line_infos return line_infos
end end

View File

@@ -43,7 +43,7 @@ files.each do |file|
tags = [] tags = []
group = 1 group = 1
draft = false draft = false
next unless File.exists?(file) next unless File.exist?(file)
value = File.size(file) value = File.size(file)
url = file.gsub(/^content/, "https://danilafe.com").delete_suffix("/index.md").delete_suffix(".md") url = file.gsub(/^content/, "https://danilafe.com").delete_suffix("/index.md").delete_suffix(".md")
File.readlines(file).each do |l| File.readlines(file).each do |l|

View File

@@ -26,7 +26,7 @@ files = ARGV
code_paths = Dir.entries(root_path).select do |f| code_paths = Dir.entries(root_path).select do |f|
File.directory?(File.join(root_path, f)) and f != '.' and f != '..' File.directory?(File.join(root_path, f)) and f != '.' and f != '..'
end.to_set end.to_set
code_paths += JSON.parse(File.read(data_file)).keys if File.exists? data_file code_paths += JSON.parse(File.read(data_file)).keys if File.exist? data_file
# Extending code_paths from submodules.json means that nested Agda modules # Extending code_paths from submodules.json means that nested Agda modules
# have their root dir correctly set. # have their root dir correctly set.

View File

@@ -5,7 +5,8 @@ require 'nokogiri'
require 'set' require 'set'
# 1) Process all files passed in from the command line # 1) Process all files passed in from the command line
files = ARGV svgpath = ARGV[0]
files = ARGV[1..]
# 2) Extract used Feather icons # 2) Extract used Feather icons
used_icons = Set.new used_icons = Set.new
@@ -27,7 +28,7 @@ end
puts "Found #{used_icons.size} unique icons: #{used_icons.to_a.join(', ')}" puts "Found #{used_icons.size} unique icons: #{used_icons.to_a.join(', ')}"
# 3) Load the full feather-sprite.svg as XML # 3) Load the full feather-sprite.svg as XML
sprite_doc = File.open("feather-sprite.svg", "r:UTF-8") { |f| Nokogiri::XML(f) } sprite_doc = File.open(svgpath, "r:UTF-8") { |f| Nokogiri::XML(f) }
# 4) Create a new SVG with only the required symbols # 4) Create a new SVG with only the required symbols
new_svg = Nokogiri::XML::Document.new new_svg = Nokogiri::XML::Document.new
@@ -43,8 +44,6 @@ sprite_doc.css("symbol").each do |symbol_node|
end end
# 5) Save the subset sprite # 5) Save the subset sprite
File.open("custom-sprite.svg", "w:UTF-8") do |f| File.open(svgpath, "w:UTF-8") do |f|
f.write(new_svg.to_xml) f.write(new_svg.to_xml)
end end
puts "Generated custom-sprite.svg with only the required icons."

View File

@@ -1,12 +1,9 @@
import os import os
import sys
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from fontTools.subset import Subsetter, Options from fontTools.subset import Subsetter, Options
from fontTools.ttLib import TTFont from fontTools.ttLib import TTFont
# Directories
HTML_DIR = "." # Directory with .html files
FONT_DIR = "." # Directory containing fonts to be modified
FONT_EXTENSIONS = (".ttf", ".woff", ".woff2", ".otf") # Font file types FONT_EXTENSIONS = (".ttf", ".woff", ".woff2", ".otf") # Font file types
def extract_text_from_html(file_path): def extract_text_from_html(file_path):
@@ -15,14 +12,11 @@ def extract_text_from_html(file_path):
soup = BeautifulSoup(f.read(), "html.parser") soup = BeautifulSoup(f.read(), "html.parser")
return soup.get_text() return soup.get_text()
def get_used_characters(directory): def get_used_characters(files):
"""Collect unique characters from all .html files in the given directory.""" """Collect unique characters from all .html files in the given directory."""
char_set = set() char_set = set()
for root, _, files in os.walk(directory):
for file in files: for file in files:
if file.endswith(".html"): text = extract_text_from_html(file)
full_path = os.path.join(root, file)
text = extract_text_from_html(full_path)
char_set.update(text) char_set.update(text)
return "".join(sorted(char_set)) return "".join(sorted(char_set))
@@ -65,10 +59,10 @@ def subset_font_in_place(font_path, characters):
print(f"Subsetted font in place: {font_path}") print(f"Subsetted font in place: {font_path}")
if __name__ == "__main__": if __name__ == "__main__":
used_chars = get_used_characters(HTML_DIR) used_chars = get_used_characters(sys.argv[2:])
print(f"Extracted {len(used_chars)} unique characters from HTML files.") print(f"Extracted {len(used_chars)} unique characters from {len(sys.argv[2:])} HTML files.")
font_files = find_font_files(FONT_DIR) font_files = find_font_files(sys.argv[1])
print(f"Found {len(font_files)} font files to subset.") print(f"Found {len(font_files)} font files to subset.")
for font_file in font_files: for font_file in font_files:

View File

@@ -339,9 +339,8 @@ this means the rule applies to (object) variables declared to have type
our system. A single rule takes care of figuring the types of _all_ our system. A single rule takes care of figuring the types of _all_
variables. variables.
{{< todo >}} > [!TODO]
The rest of this, but mostly statements. > The rest of this, but mostly statements.
{{< /todo >}}
### This Page at a Glance ### This Page at a Glance
#### Metavariables #### Metavariables

View File

@@ -0,0 +1,591 @@
---
title: "Chapel's Runtime Types as an Interesting Alternative to Dependent Types"
date: 2025-03-02T22:52:01-08:00
tags: ["Chapel", "C++", "Idris", "Programming Languages"]
description: "In this post, I discuss Chapel's runtime types as a limited alternative to dependent types."
---
One day, when I was in graduate school, the Programming Languages research
group was in a pub for a little gathering. Amidst beers, fries, and overpriced
sandwiches, the professor and I were talking about [dependent types](https://en.wikipedia.org/wiki/Dependent_type). Speaking
loosely and imprecisely, these are types that are somehow constructed from
_values_ in a language, like numbers.
For example, in C++, [`std::array`](https://en.cppreference.com/w/cpp/container/array)
is a dependent type. An instantiation of the _type_ `array`, like `array<string, 3>`
is constructed from the type of its elements (here, `string`) and a value
representing the number of elements (here, `3`). This is in contrast with types
like `std::vector`, which only depends on a type (e.g., `vector<string>` would
be a dynamically-sized collection of strings).
I was extolling the virtues of general dependent types, like you might find
in [Idris](https://www.idris-lang.org/) or [Agda](https://agda.readthedocs.io/en/latest/getting-started/what-is-agda.html):
more precise function signatures! The
{{< sidenote "right" "curry-howard-note" "Curry-Howard isomorphism!" >}}
The Curry-Howard isomorphism is a common theme on this blog. I've
<a href="{{< relref "typesafe_interpreter_revisited#curry-howard-correspondence" >}}">
written about it myself</a>, but you can also take a look at the
<a href="https://en.wikipedia.org/wiki/Curry%E2%80%93Howard_correspondence">
Wikipedia page</a>.
{{< /sidenote >}} The professor was skeptical. He had been excited about
dependent types in the past, but nowadays he felt over them. They were cool, he
said, but there are few practical uses. In fact, he posed a challenge:
> Give me one good reason to use dependent types in practice that doesn't
> involve keeping track of bounds for lists and matrices!
{#bounds-quote}
This challenge alludes to fixed-length lists -- [vectors](https://agda.github.io/agda-stdlib/master/Data.Vec.html)
-- which are one of the first dependently-typed data structures one learns about.
Matrices are effectively vectors-of-vectors. In fact, even in giving my introductory
example above, I demonstrated the C++ equivalent of a fixed-length list, retroactively
supporting the professor's point.
It's not particularly important to write down how I addressed the challenge;
suffice it to say that the notion resonated with some of the other
students present in the pub. In the midst of practical development, how much
of dependent types' power can you leverage, and how much power do you pay
for but never use?
A second round of beers arrived. The argument was left largely unresolved,
and conversation flowed to other topics. Eventually, I graduated, and started
working on the [Chapel language](https://chapel-lang.org/) team (I also
[write on the team's blog](https://chapel-lang.org/blog/authors/daniel-fedorin/)).
When I started looking at Chapel programs, I could not believe my eyes...
### A Taste of Chapel's Array Types
Here's a simple Chapel program that creates an array of 10 integers.
```Chapel
var A: [0..9] int;
```
Do you see the similarity to the `std::array` example above? Of course, the
syntax is quite different, but in _essence_ I think the resemblance is
uncanny. Let's mangle the type a bit --- producing invalid Chapel programs ---
just for the sake of demonstration.
```Chapel
var B: array(0..9, int); // first, strip the syntax sugar
var C: array(int, 0..9); // swap the order of the arguments to match C++
```
Only one difference remains: in C++, arrays are always indexed from zero. Thus,
writing `array<int, 10>` would implicitly create an array whose indices start
with `0` and end in `9`. In Chapel, array indices can start at values other
than zero (it happens to be useful for elegantly writing numerical programs),
so the type explicitly specifies a lower and a higher bound. Other than that,
though, the two types look very similar.
In general, Chapel arrays have a _domain_, typically stored in variables like `D`.
The domain of `A` above is `{0..9}`. This domain is part of the array's type.
Before I move on, I'd like to pause and state a premise that is crucial
for the rest of this post: __I think knowing the size of a data structure,
like `std::array` or Chapel's `[0..9] int`, is valuable__. If this premise
were not true, there'd be no reason to prefer `std::array` to `std::vector`, or
care that Chapel has indexed arrays. However, having this information
can help in numerous ways, such as:
* __Enforcing compatible array shapes.__ For instance, the following Chapel
code would require two arrays passed to function `foo` to have the same size.
```Chapel
proc doSomething(people: [?D] person, data: [D] personInfo) {}
```
Similarly, we can enforce the fact that an input to a function has the same shape
as the output:
```Chapel
proc transform(input: [?D] int): [D] string;
```
* __Consistency in generics__. Suppose you have a generic function that declares
a new variable of a given type, and just returns it:
```Chapel
proc defaultValue(type argType) {
var x: argType;
return x;
}
```
Code like this exists in "real" Chapel software, by the way --- the example
is not contrived. By including the bounds etc. into the array type, we can
ensure that `x` is appropriately allocated. Then, `defaultValue([1,2,3].type)`
would return an array of three default-initialized integers.
* __Eliding boundary checking__. Boundary checking is useful for safety,
since it ensures that programs don't read or write past the end of allocated
memory. However, bounds checking is also slow. Consider the following function that
sums two arrays:
```Chapel
proc sumElementwise(A: [?D] int, B: [D] int) {
var C: [D] int;
for idx in D do
C[idx] = A[idx] + B[idx];
}
```
Since arrays `A`, `B`, and `C` have the same domain `D`, we don't need
to do bound checking when accessing any of their elements. I don't believe
this is currently an optimisation in Chapel, but it's certainly on the
table.
* __Documentation__. Including the size of the array as part of type
signature clarifies the intent of the code being written. For instance,
in the following function:
```Chapel
proc sendEmails(numEmails: int, destinationAddrs: [1..numEmails] address) { /* ... */ }
```
It's clear from the type of the `destinationAddrs`s that there ought to
be exactly as many `destinationAddrs` as the number of emails that should
be sent.
Okay, recap: C++ has `std::array`, which is a dependently-typed container
that represents an array with a fixed number of elements. Chapel has something
similar. I think these types are valuable.
At this point, it sort of looks like I'm impressed with Chapel for copying a C++
feature from 2011. Not so! As I played with Chapel programs more and more,
arrays miraculously supported patterns that I knew I couldn't write in C++.
The underlying foundation of Chapel's array types is quite unlike any other.
Before we get to that, though, let's take a look at how dependent types
are normally used (by us mere mortal software engineers).
### Difficulties with Dependent Types
Let's start by looking at a simple operation on fixed-length lists: reversing them.
One might write a reverse function for "regular" lists, ignoring details
like ownership, copying, that looks like this:
```C++
std::vector<int> reverse(std::vector<int>);
```
This function is not general: it won't help us reverse lists of
strings, for instance. The "easy fix" is to replace `int` with some kind
of placeholder that can be replaced with any type.
```C++
std::vector<T> reverse(std::vector<T>);
```
You can try compiling this code, but you will immediately run into an error.
What the heck is `T`? Normally,
when we name a variable, function, or type (e.g., by writing `vector`, `reverse`),
we are referring to its declaration somewhere else. At this time, `T` is not
declared anywhere. It just "appears" in our function's type. To fix this,
we add a declaration for `T` by turning `reverse` into a template:
```C++
template <typename T>
std::vector<T> reverse(std::vector<T>);
```
The new `reverse` above takes two arguments: a type and a list of values of
that type. So, to _really_ call this `reverse`, we need to feed the type
of our list's elements into it. This is normally done automatically
(in C++ and otherwise) but under the hood, invocations might look like this:
```C++
reverse<int>({1,2,3}); // produces 3, 2, 1
reverse<string>({"world", "hello"}) // produces "hello", "world"
```
This is basically what we have to do to write `reverse` on `std::array`, which,
includes an additional parameter that encodes its length. We might start with
the following (using `n` as a placeholder for length, and observing that
reversing an array doesn't change its length):
```C++
std::array<T, n> reverse(std::array<T, n>);
```
Once again, to make this compile, we need to add template parameters for `T` and `n`.
```C++
template <typename T, size_t n>
std::array<T, n> reverse(std::array<T, n>);
```
Now, you might be asking...
{{< dialog >}}
{{< message "question" "reader" >}}
This section is titled "Difficulties with Dependent Types". What's the difficulty?
{{< /message >}}
{{< /dialog >}}
Well, here's the kicker. C++ templates are a __compile-time mechanism__. As
a result, arguments to `template` (like `T` and `n`) must be known when the
program is being compiled. This, in turn, means
{{< sidenote "right" "deptype-note" "the following program doesn't work:" >}}
The observant reader might have noticed that one of the Chapel programs we
saw above, <code>sendEmails</code>, does something similar. The
<code>numEmails</code> argument is used in the type of the
<code>destinationAddrs</code> parameter. That program is valid Chapel.
{{< /sidenote >}}
```C++
void buildArray(size_t len) {
std::array<int, len> myArray;
// do something with myArray
}
```
You can't use these known-length types like `std::array` with any length
that is not known at compile-time. But that's a lot of things! If you're reading
from an input file, chances are, you don't know how big that file is. If you're
writing a web server, you likely don't know the length the HTTP requests.
With every setting a user can tweak when running your code, you sacrifice the
ability to use templated types.
Also, how do you _return_ a `std::array`? If the size of the returned array is
known in advance, you just list that size:
```C++
std::array<int, 10> createArray();
```
If the size is not known at compile-time, you might want to do something like
the following --- using an argument `n` in the type of the returned array ---
but it would not compile:
```C++
auto computeNNumbers(size_t n) -> std::array<int, n>; // not valid C++
```
Moreover, you actually can't use `createArray` to figure out the required
array size, and _then_ return an array that big, even if in the end you
only used compile-time-only computations in the body of `createArray`.
What you would need is to provide a "bundle" of a value and a type that is somehow
built from that value.
```C++
// magic_pair is invented syntax, will not even remotely work
auto createArray() -> magic_pair<size_t size, std::array<int, size>>;
```
This pair contains a `size` (suppose it's known at compilation time for
the purposes of appeasing C++) as well as an array that uses that `size`
as its template argument. This is not real C++ -- not even close -- but
such pairs are a well-known concept. They are known as
[dependent pairs](https://unimath.github.io/agda-unimath/foundation.dependent-pair-types.html),
or, if you're trying to impress people, \(\Sigma\)-types. In Idris, you
could write `createArray` like this:
```Idris
createArray : () -> (n : Nat ** Vec n Int)
```
There are languages out there -- that are not C++, alas -- that support
dependent pairs, and as a result make it more convenient to use types that
depend on values. Not only that, but a lot of these languages do not force
dependent types to be determined at compile-time. You could write that
coveted `readArrayFromFile` function:
```Idris
readArrayFromFile : String -> IO (n : Nat ** Vec n String)
```
Don't mind `IO`; in pure languages like Idris, this type is a necessity when
interacting when reading data in and sending it out. The key is that
`readArrayFromFile` produces, at runtime, a pair of `n`, which is the size
of the resulting array, and a `Vec` of that many `String`s (e.g., one string
per line of the file).
Dependent pairs are cool and very general. However, the end result of
types with bounds which are not determined at compile-time is that you're
_required_ to use dependent pairs. Thus, you must always carry the array's length
together with the array itself.
The bottom line is this:
* In true dependently typed languages, a type that depends on a value (like `Vec`
in Idris) lists that value in its type. When this value is listed by
referring to an identifier --- like `n` in `Vec n String` above --- this
identifier has to be defined somewhere, too. This necessitates dependent pairs,
in which the first element is used syntactically as the "definition point"
of a type-level value. For example, in the following piece of code:
```Idris
(n : Nat ** Vec n String)
```
The `n : Nat` part of the pair serves both to say that the first element
is a natural number, and to introduce a variable `n` that refers to
this number so that the second type (`Vec n String`) can refer to it.
A lot of the time, you end up carrying this extra value (bound to `n` above)
with your type.
* In more mainstream languages, things are even more restricted: dependently
typed values are a compile-time property, and thus, cannot be used with
runtime values like data read from a file, arguments passed in to a function,
etc..
### Hiding Runtime Values from the Type
Let's try to think of ways to make things more convenient. First of all, as
we saw, in Idris, it's possible to use runtime values in types. Not only that,
but Idris is a compiled language, so presumably we can compile dependently typed programs
with runtime-enabled dependent types. The trick is to forget some information:
turn a vector `Vec n String` into two values (the size of the vector and the
vector itself), and forget -- for the purposes of generating code -- that they're
related. Whenever you pass in a `Vec n String`, you can compile that similarly
to how you'd compile passing in a `Nat` and `List String`. Since the program has
already been type checked, you can be assured that you don't encounter cases
when the size and the actual vector are mismatched, or anything else of that
nature.
Additionally, you don't always need the length of the vector at all. In a
good chunk of Idris code, the size arguments are only used to ensure type
correctness and rule out impossible cases; they are never accessed at runtime.
As a result, you can _erase_ the size of the vector altogether. In fact,
[Idris 2](https://github.com/idris-lang/Idris2/) leans on [Quantitative Type Theory](https://bentnib.org/quantitative-type-theory.html)
to make erasure easier.
At this point, one way or another, we've "entangled" the vector with a value
representing its size:
* When a vector of some (unknown, but fixed) length needs to be produced from
a function, we use dependent pairs.
* Even in other cases, when compiling, we end up treating a vector as a
length value and the vector itself.
Generally speaking, a good language design practice is to hide extraneous
complexity, and to remove as much boilerplate as necessary. If the size
value of a vector is always joined at the hip with the vector, can we
avoid having to explicitly write it?
This is pretty much exactly what Chapel does. It _allows_ explicitly writing
the domain of an array as part of its type, but doesn't _require_ it. When
you do write it (re-using my original snippet above):
```Chapel
var A: [0..9] int;
```
What you are really doing is creating a value (the [range](https://chapel-lang.org/docs/primers/ranges.html) `0..9`),
and entangling it with the type of `A`. This is very similar to what a language
like Idris would do under the hood to compile a `Vec`, though it's not quite
the same.
At the same time, you can write code that omits the bounds altogether:
```Chapel
proc processArray(A: [] int): int;
proc createArray(): [] int;
```
In all of these examples, there is an implicit runtime value (the bounds)
that is associated with the array's type. However, we are never forced to
explicitly thread through or include a size. Where reasoning about them is not
necessary, Chapel's domains are hidden away. Chapel refers to the implicitly
present value associated with an array type as its _runtime type_.
I hinted earlier that things are not quite the same in this representation
as they are in my simplified model of Idris. In Idris, as I mentioned earlier,
the values corresponding to vectors' indices can be erased if they are not used.
In Chapel, this is not the case --- a domain always exists at runtime. At the
surface level, this means that you may pay for more than what you use. However,
domains enable a number of interesting patterns of array code. We'll get
to that in a moment; first, I want to address a question that may be on
your mind:
{{< dialog >}}
{{< message "question" "reader" >}}
At this point, this looks just like keeping a <code>.length</code> field as
part of the array value. Most languages do this. What's the difference
between this and Chapel's approach?
{{< /message >}}
{{< /dialog >}}
This is a fair question. The key difference is that the length exists even if an array
does not. The following is valid Chapel code (re-using the `defaultValue`
snippet above):
```Chapel
proc defaultValue(type argType) {
var x: argType;
return x;
}
proc doSomething() {
type MyArray = [1..10] int;
var A = defaultValue(MyArray);
}
```
Here, we created an array `A` with the right size (10 integer elements)
without having another existing array as a reference. This might seem like
a contrived example (I could've just as well written `var A: [1..10] int`),
but the distinction is incredibly helpful for generic programming. Here's
a piece of code from the Chapel standard library, which implements
a part of Chapel's [reduction](https://chapel-lang.org/docs/primers/reductions.html) support:
{{< githubsnippet "chapel-lang/chapel" "e8ff8ee9a67950408cc6d4c3220ac647817ddae3" "modules/internal/ChapelReduce.chpl" "Chapel" 146 >}}
inline proc identity {
var x: chpl__sumType(eltType); return x;
}
{{< /githubsnippet >}}
Identity elements are important when performing operations like sums and products,
for many reasons. For one, they tell you what the sum (e.g.) should be when there
are no elements at all. For another, they can be used as an initial value for
an accumulator. In Chapel, when you are performing a reduction, there is a
good chance you will need several accumulators --- one for each thread performing
a part of the reduction.
That `identity` function looks almost like `defaultValue`! Since it builds the
identity element from the type, and since the type includes the array's dimensions,
summing an array-of-arrays, even if it's empty, will produce the correct output.
```Chapel
type Coordinate = [1..3] real;
var Empty: [0..<0] Coordinate;
writeln(+ reduce Empty); // sum up an empty list of coordinates
```
As I mentioned before, having the domain be part of the type can also enable
indexing optimizations --- without any need for [interprocedural analysis](https://en.wikipedia.org/wiki/Interprocedural_optimization) ---
in functions like `sumElementwise`:
```Chapel
proc sumElementwise(A: [?D] int, B: [D] int) {
var C: [D] int;
for idx in D do
C[idx] = A[idx] + B[idx];
}
```
The C++ equivalent of this function -- using `vectors` to enable arbitrary-size
lists of numbers read from user input, and `.at` to enable bounds checks ---
does not include enough information for this optimization to be possible.
```C++
void sumElementwise(std::vector<int> A, std::vector<int> B) {
std::vector<int> C(A.size());
for (size_t i = 0; i < A.size(); i++) {
C.at(i) = A.at(i) + B.at(i);
}
}
```
All in all, this makes for a very interesting mix of features:
* __Chapel arrays have their bounds as part of types__, like `std::array` in C++
and `Vec` in Idris. This enables all the benefits I've described above.
* __The bounds don't have to be known at compile-time__, like all dependent
types in Idris. This means you can read arrays from files (e.g.) and still
reason about their bounds as part of the type system.
* __Domain information can be hidden when it's not used__, and does not require
explicit additional work like template parameters or dependent pairs.
Most curiously, runtime types only extend to arrays and domains. In that sense,
they are not a general purpose replacement for dependent types. Rather,
they make arrays and domains special, and single out the exact case my
professor was [talking about in the introduction](#bounds-quote). Although
at times I've [twisted Chapel's type system in unconventional ways](https://chapel-lang.org/blog/posts/linear-multistep/)
to simulate dependent types, rarely have I felt a need for them while
programming in Chapel. In that sense --- and in the "practical software engineering"
domain --- I may have been proven wrong.
### Pitfalls of Runtime Types
Should all languages do things the way Chapel does? I don't think so. Like
most features, runtime types like that in Chapel are a language design
tradeoff. Though I've covered their motivation and semantics, perhaps
I should mention the downsides.
The greatest downside is that, generally speaking, _types are not always a
compile-time property_. We saw this earlier with `MyArray`:
```Chapel
type MyArray = [1..10] int;
```
Here, the domain of `MyArray` (one-dimensional with bounds `1..10`) is a runtime
value. It has an
{{< sidenote "right" "dce-note" "execution-time cost." >}}
The execution-time cost is, of course, modulo <a href="https://en.wikipedia.org/wiki/Dead-code_elimination">dead code elimination</a> etc.. If
my snippet made up the entire program being compiled, the end result would
likely do nothing, since <code>MyArray</code> isn't used anywhere.
{{< /sidenote >}}
Moreover, types that serve as arguments to functions (like `argType` for
`defaultValue`), or as their return values (like the result of `chpl__sumType`)
also have an execution-time backing. This is quite different from most
compiled languages. For instance, in C++, templates are "stamped out" when
the program is compiled. A function with a `typename T` template parameter
called with type `int`, in terms of generated code, is always the same as
a function where you search-and-replaced `T` with `int`. This is called
[monomorphization](https://en.wikipedia.org/wiki/Monomorphization), by the
way. In Chapel, however, if the function is instantiated with an array type,
it will have an additional parameter, which represents the runtime component
of the array's type.
The fact that types are runtime entities means that compile-time type checking
is insufficient. Take, for instance, the above `sendEmails` function:
```Chapel
proc sendEmails(numEmails: int, destinationAddrs: [1..numEmails] address) { /* ... */ }
```
Since `numEmails` is a runtime value (it's a regular argument!), we can't ensure
at compile-time that a value of some array matches the `[1..numEmails] address`
type. As a result, Chapel defers bounds checking to when the `sendEmails`
function is invoked.
This leads to some interesting performance considerations. Take two Chapel records
(similar to `struct`s in C++) that simply wrap a value. In one of them,
we provide an explicit type for the field, and in the other, we leave the field
type generic.
```Chapel
record R1 { var field: [1..10] int; }
record R2 { var field; }
var A = [1,2,3,4,5,6,7,8,9,10];
var r1 = new R1(A);
var r2 = new R2(A);
```
In a conversation with a coworker, I learned that these are not the same.
That's because the record `R1` explicitly specifies a type
for `field`. Since the type has a runtime component, the constructor
of `R1` will actually perform a runtime check to ensure that the argument
has 10 elements. `R2` will not do this, since there isn't any other type
to check against.
Of course, the mere existence of an additional runtime component is a performance
consideration. To ensure that Chapel programs perform as well as possible,
the Chapel standard library attempts to avoid using runtime components
wherever possible. This leads to a distinction between a "static type"
(known at compile-time) and a "dynamic type" (requiring a runtime value).
The `chpl__sumType` function we saw mentioned above uses static components of
types, because we don't want each call to `+ reduce` to attempt to run a number
of extraneous runtime queries.
### Conclusion
Though runtime types are not a silver bullet, I find them to be an elegant
middle-ground solution to the problem of tracking array bounds. They enable
optimizations, generic programming, and more, without the complexity of
a fully dependently-typed language. They are also quite unlike anything I've
seen in any other language.
What's more, this post only scratches the surface of what's possible using
arrays and domains. Besides encoding array bounds, domains include information
about how an array is distributed across several nodes (see the
[distributions primer](https://chapel-lang.org/docs/primers/distributions.html)),
and how it's stored in memory (see the [sparse computations](https://chapel-lang.org/blog/posts/announcing-chapel-2.3/#sparse-computations)
section of the recent 2.3 release announcement). In general, they are a very
flavorful component to Chapel's "special sauce" as a language for parallel
computing.
You can read more about arrays and domains in the [corresponding primer](https://chapel-lang.org/docs/primers/arrays.html).

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -0,0 +1,415 @@
---
title: "Reasons to Love the Field of Programming Languages"
date: 2025-12-31
tags: ["Programming Languages", "Compilers", "Type Systems"]
---
I work at HPE on the
[Chapel Programming Language](https://chapel-lang.org). Recently, another HPE
person asked me:
> So, you work on the programming language. What's next for you?
This caught me off-guard because I hadn't even conceived of moving on.
I don't want to move on, because __I love the field of programming languages__.
In addition, I have come to think there is something in PL for everyone, from
theorists to developers to laypeople.
So, in that spirit, I am writing this list as a non-exhaustive survey that holds
the dual purpose of explaining my personal infatuation with PL, and providing
others with ways to engage with PL that align with their existing interests.
I try to provide rationale for each claim, but you can just read the reasons
themselves and skip the rest.
My general thesis goes something like this: programming languages are a unique
mix of the __inherently human and social__ and the __deeply mathematical__,
a mix that often remains deeply grounded in the practical, __low-level realities of
our hardware__.
Personally, I find all of these properties equally important, but we have to
start somewhere. Let's begin with the human aspect of programming languages.
### Human Aspects of PL
> Programs must be written for people to read, and only incidentally for machines
> to execute.
>
> --- Abelson & Sussman, _Structure and Interpretation of Computer Programs_.
As we learn more about the other creatures that inhabit our world, we discover
that they are similar to us in ways that we didn't expect. However, our
language is unique to us. It gives us the ability to go far beyond
the simple sharing of information: we communicate abstract concepts,
social dynamics, stories. In my view, storytelling is our birthright more
so than anything else.
I think this has always been reflected in the broader discipline of programming.
_Code should always tell a story_, I've heard throughout my education and career.
_It should explain itself_. In paradigms such as
[literate programming](https://en.wikipedia.org/wiki/Literate_programming),
we explicitly mix prose and code. Notebook technologies
like [Jupyter](https://jupyter.org/) intersperse computation with explanations
thereof.
* __Reason 1__: programming languages provide the foundation of expressing
human thought and stories through code.
From flowery prose to clinical report, human expression takes a wide variety
of forms. The need to vary our descriptions is also well-served by the diversity
of PL paradigms. From stateful transformations in languages like Python and C++,
through pure and immutable functions in Haskell and Lean, to fully declarative
statements-of-fact in Nix, various languages have evolved to
support the many ways in which we wish to describe our world and our needs.
* __Reason 2__: diverse programming languages enable different perspectives
and ways of storytelling, allowing us choice in how to express our thoughts
and solve our problems.
Those human thoughts of ours are not fundamentally grounded in logic,
mathematics, or anything else. They are a product of millennia of evolution
through natural selection, of adaptation to ever-changing conditions.
Our cognition is limited, rife with blind spots, and partial to the subject
matter at hand. We lean on objects, actors, contracts, and more as helpful,
mammal-compatible analogies. I find this to be beautiful; here is something
we can really call ours.
* __Reason 3__: programming languages imbue the universe's fundamental rules of
computation with humanity's identity and idiosyncrasies. They carve out
a home for us within impersonal reality.
Storytelling (and, more generally, writing) is not just about communicating
with others. Writing helps clarify one's own thoughts, and to think deeper.
In his 1979 Turing Award lecture,
[Notation as a Tool of Thought](https://www.eecg.utoronto.ca/~jzhu/csc326/readings/iverson.pdf),
Kenneth Iverson, the creator of [APL](https://tryapl.org/), highlighted ways
in which programming languages, with their notation, can help express patterns
and facilitate thinking.
Throughout computing history, programming languages built abstractions that ---
together with advances in hardware --- made it possible to create ever more
complex software. Dijkstra's
[structured programming](https://en.wikipedia.org/wiki/Structured_programming)
crystallized the familiar patterns of `if`/`else` and `while` out of
a sea of control flow. Structures and objects partitioned data and state
into bundles that could be reasoned about, or put out of mind when irrelevant.
Recently, I dare say that notions of ownership and lifetimes popularized
by Rust have clarified how we think about memory.
* __Reason 4__: programming languages combat complexity, and give us tools to
think and reason about unwieldy and difficult problems.
The fight against complexity occurs on more battlegrounds than PL design alone.
Besides its syntax and semantics, a programming language is comprised of its
surrounding tooling: its interpreter or compiler, perhaps its package manager
or even its editor. Language designers and developers take great care to
[improve the quality of error messages](https://elm-lang.org/news/compiler-errors-for-humans),
to provide [convenient editor tooling](https://chapel-lang.org/blog/posts/chapel-lsp/),
and build powerful package managers
like [Yarn](https://yarnpkg.com/). Thus, in each language project, there is
room for folks who, even if they are not particularly interested in grammars or
semantics, care about the user experience.
* __Reason 5__: programming languages provide numerous opportunities for
thoughtful forays into the realms of User Experience and Human-Computer
Interaction.
I hope you agree, by this point, that programming languages are fundamentally
tethered to the human. Like any human endeavor, then, they don't exist in
isolation. To speak a language, one usually wants a partner who understands
and speaks that same language. Likely, one wants a whole community, topics
to talk about, or even a set of shared beliefs or mythologies. This desire
maps onto the realm of programming languages. When using a particular PL,
you want to talk to others about your code, implement established design patterns,
use existing libraries.
I mentioned mythologies earlier. In some ways, language
communities do more than share know-how about writing code. In many
cases, I think language communities rally around ideals embodied by their
language. The most obvious example seems to be Rust. From what I've seen,
the Rust community believes in language design that protects its users
from the pitfalls of low-level programming. The Go community
believes in radical simplicity. Julia actively incorporates contributions from
diverse research projects into an interoperable set of scientific packages.
* __Reason 6__: programming languages are complex collaborative social projects
that have the power to champion innovative ideas within the field of
computer science.
So far, I've presented interpretations of the field of PL as tools for expression and thought,
human harbor to the universe's ocean, and collaborative social projects.
These interpretations coexist and superimpose, but they are only a fraction of
the whole. What has kept me enamored with PL is that it blends these human
aspects with a mathematical ground truth, through fundamental connections to
computation and mathematics.
### The Mathematics of PL
> Like buses: you wait two thousand years for a definition of “effectively
> calculable”, and then three come along at once.
>
> --- Philip Wadler, _Propositions as Types_
There are two foundations,
[lambda calculus](https://en.wikipedia.org/wiki/Lambda_calculus) and
[Turing machines](https://en.wikipedia.org/wiki/Turing_machine), that underpin
most modern PLs. The abstract notion of Turing machines
is closely related to, and most similar among the "famous" computational models,
to the
[von Neumann Architecture](https://en.wikipedia.org/wiki/Von_Neumann_architecture).
Through bottom-up organization of "control unit instructions" into
"structured programs" into the imperative high-level languages today, we can
trace the influence of Turing machines in C++, Python, Java, and many others.
At the same time, and running on the same hardware functional programming
languages like Haskell represent a chain of succession from the lambda calculus,
embellished today with types and numerous other niceties. These two lineages
are inseparably linked: they have been mathematically proven to be equivalent.
They are two worlds coexisting.
The two foundations have a crucial property in common: they are descriptions
of what can be computed. Both were developed initially as mathematical formalisms.
They are rooted not only in pragmatic concerns of "what can I do with
these transistors?", but in the deeper questions of "what can be done
with a computer?".
* __Reason 7__: general-purpose programming languages are built on foundations of computation,
and wield the power to compute anything we consider "effectively computable at all".
Because of these mathematical beginnings, we have long had precise and powerful
ways to talk about what code written in a particular language _means_.
This is the domain of _semantics_. Instead of reference implementations
of languages (CPython for Python, `rustc` for Rust), and instead of textual
specifications, we can explicitly map constructs in languages either to
mathematical objects ([denotational semantics](https://en.wikipedia.org/wiki/Denotational_semantics))
or to (abstractly) execute them ([operational semantics](https://en.wikipedia.org/wiki/Operational_semantics)).
To be honest, the precise and mathematical nature of these tools is, for me,
justification enough to love them. However, precise semantics for languages
have real advantages. For one, they allow us to compare programs' real
behavior with what we _expect_, giving us a "ground truth" when trying to
fix bugs or evolve the language. For another, they allow us to confidently
make optimizations: if you can _prove_ that a transformation won't affect
a program's behavior, but make it faster, you can safely use it. Finally,
the discipline of formalizing programming language semantics usually entails
boiling them down to their most essential components. Stripping the
[syntax sugar](https://en.wikipedia.org/wiki/Syntactic_sugar) helps clarify
how complex combinations of features should behave together.
Some of these techniques bear a noticeable resemblance to the study of
semantics in linguistics. Given our preceding discussion on the humanity
of programming languages, perhaps that's not too surprising.
* __Reason 8__: programming languages can be precisely formalized, giving
exact, mathematical descriptions of how they should work.
In talking about how programs behave, we run into an important limitation
of reasoning about Turing machines and lambda calculus, stated precisely in
[Rice's theorem](https://en.wikipedia.org/wiki/Rice%27s_theorem):
all non-trivial semantic properties of programs (termination, throwing errors)
are undecidable. There will always be programs that elude not only human analysis,
but algorithmic understanding.
It is in the context of this constraint that I like to think about type systems.
The beauty of type systems, to me, is in how they tame the impossible.
Depending on the design of a type system, a well-typed program may well be
guaranteed not to produce any errors, or produce only the "expected" sort of
errors. By constructing reasonable _approximations_ of program
behavior, type systems allow us to verify that programs are well-behaved in
spite of Rice's theorem. Much of the time, too, we can do so in a way that is
straightforward for humans to understand and machines to execute.
* __Reason 9__: in the face of the fundamentally impossible, type systems
pragmatically grant us confidence in our programs for surprisingly little
conceptual cost.
At first, type systems look like engineering formalisms. That
may well be the original intention, but in our invention of type systems,
we have actually completed a quadrant of a deeper connection: the
[Curry-Howard isomorphism](https://en.wikipedia.org/wiki/Curry%E2%80%93Howard_correspondence).
[Propositions](https://en.wikipedia.org/wiki/Proposition), in the logical sense,
correspond one-to-one with types of programs, and proofs of these propositions
correspond to programs that have the matching type.
This is an incredibly deep connection. In adding parametric polymorphism
to a type system (think Java generics, or C++ templates without specialization),
we augment the corresponding logic with the "for all x" (\(\forall x\)) quantifier.
Restrict the copying of values in a way similar to Rust, and you get an
[affine logic](https://en.wikipedia.org/wiki/Affine_logic), capable of reasoning about resources and their use.
In languages like Agda with [dependent types](https://en.wikipedia.org/wiki/Dependent_type),
you get a system powerful enough [to serve as a foundation for mathematics](https://en.wikipedia.org/wiki/Intuitionistic_type_theory).
Suddenly, you can write code and mathematically prove properties about that
code in the same language. I've done this in my work with
[formally-verified static program analysis]({{< relref "series/static-program-analysis-in-agda" >}}).
This connection proves appealing even from the perspective of "regular"
mathematics. We have developed established engineering practices
for writing code: review, deployment, documentation. What if we could use
the same techniques for doing mathematics? What if, through the deep
connection of programming languages to logic, we could turn mathematics
into a computer-verified, collaborative endeavor?
I therefore present:
* __Reason 10__: type systems for programming languages deeply correspond
to logic, allowing us to mathematically prove properties about code,
using code, and to advance mathematics through the practices of software engineering.
{{< details summary="Bonus meta-reason to love the mathy side of PL!" >}}
In addition to the theoretical depth, I also find great enjoyment in the way that PL is practiced.
Here more than elsewhere, creativity and artfulness come into
play. In PL, [inference rules](https://en.wikipedia.org/wiki/Rule_of_inference) are a
lingua franca through which the formalisms I've mentioned above are expressed
and shared. They are such a central tool in the field that I've
developed [a system for exploring them interactively]({{< relref "blog/bergamot" >}})
on this blog.
In me personally, inference rules spark joy. They are a concise and elegant
way to do much of the formal heavy-lifting I described in this section;
we use them for operational semantics, type systems, and sometimes more.
When navigating the variety and complexity of the many languages and type
systems out there, we can count on inference rules to take us directly to
what we need to know. This same variety naturally demands flexibility in
how rules are constructed, and what notation is used. Though this can sometimes
be troublesome (one [paper](https://labs.oracle.com/pls/apex/f?p=LABS%3A0%3A%3AAPPLICATION_PROCESS%3DGETDOC_INLINE%3A%3A%3ADOC_ID%3A959")
I've seen describes __27__ different ways of writing the simple operation of substitution in literature!),
it also creates opportunities for novel and elegant ways of formalizing
PL.
* __Bonus Reason__: the field of programming languages has a standard technique
for expressing its formalisms, which precisely highlights core concepts
and leaves room for creative expression and elegance.
{{< /details >}}
I know that mathematics is a polarizing subject. Often, I find myself
torn between wanting precision and eschewing overzealous formalism. The
cusp between the two is probably determined by my own tolerance for abstraction.
Regardless of how much abstraction you are interested in learning about,
PL has another dimension, close to the ground: more often than not, our languages
need to execute on real hardware.
### Pragmatics of PL
Your perfectly-designed language can be completely useless if there is no
way to
{{< sidenote "right" "execute-note" "execute it" >}}
Technically, there are language that don't care if you execute them at all.
Many programs in theorem-proving languages like Agda and Rocq exist only
to be type-checked. So, you could nitpick this claim; or, you could take
it more generally: your language can be useless if there's no
way to make it efficiently do what it's been made to do.
{{< /sidenote >}} efficiently. Thus, the field of PL subsumes not only
the theoretical foundations of languages and their human-centric design; it
includes also their realization as software.
The overall point of this section is that there is much depth to the techniques
involved in bringing a programming language to life. If you are a tinkerer
or engineer at heart, you will never run out of avenues of exploration.
The reasons are all framed from this perspective.
One fascinating aspect to programming languages is the "direction" from
which they have grown. On one side, you have languages that came
together from the need to control and describe hardware. I'd say that
this is the case for C and C++, Fortran, and others. More often than not,
these languages are compiled to machine code. Still subject to human
constraints, these languages often evolve more user-facing features as time
goes on. On the other side, you have languages developed to enable
people to write software, later faced constraints of actually working
efficiently. These are languages like Python, Ruby, and JavaScript. These
languages are often interpreted (executed by a dedicated program), with
techniques such as [just-in-time compilation](https://en.wikipedia.org/wiki/Just-in-time_compilation).
There is no one-size-fits-all way to execute a language, and as a result,
* __Reason 11__: the techniques of executing programming languages are varied
and rich. From compilation, to JIT, to interpretation, the field
has many sub-disciplines, each with its own know-hows and tricks.
At the same time, someone whose goal is to actually develop a compiler
likely doesn't want to develop everything from scratch. To do so would
be a daunting task, especially if you want the compiler to run beyond
the confines of a personal machine. CPU [architectures](https://en.wikipedia.org/wiki/Instruction_set_architecture)
and operating system differences are hard for any individual to keep up with.
Fortunately, we have a gargantuan ongoing effort in the field:
the [LLVM Project](https://llvm.org/). LLVM spans numerous architectures
and targets, and has become a common back-end for languages like C++
(via [Clang](https://clang.llvm.org/get_started.html)), Swift, and Rust.
LLVM helps share and distribute the load of keeping up with the ongoing
march of architectures and OSes. It also provides a shared playground upon
which to experiment with language implementations, optimizations, and more.
* __Reason 12__: large projects like LLVM enable language designers to
lean on decades of precedent to develop a compiler for their language.
Though LLVM is powerful, it does not automatically grant languages implemented
with it good performance. In fact, no other tool does. To make a language
run fast requires a deep understanding of the language itself, the hardware
upon which it runs, and the tools used to execute it. That is a big ask!
Modern computers are extraordinarily complex. Techniques such as
[out-of-order execution](https://en.wikipedia.org/wiki/Out-of-order_execution),
[caching](https://en.wikipedia.org/wiki/Cache_(computing)#HARDWARE),
and [speculative execution](https://en.wikipedia.org/wiki/Speculative_execution)
are constantly at play. This means that any program is subject to hard-to-predict
and often unintuitive effects. On top of that, depending on your language's
capabilities, performance work can often entail working with additional
hardware, such as GPUs and NICs, which have their own distinct performance
characteristics. This applies both to compiled and interpreted languages.
Therefore, I give you:
* __Reason 13__: improving the performance of a programming language is rife
with opportunities to engage with low-level details of the hardware
and operating system.
In the [mathematics section](#the-mathematics-of-pl), we talked about how constructing correct
optimizations requires an understanding of the language's semantics. It
was one of the practical uses for having a mathematical definition of a language.
Reason 13 is where that comes in, but the synthesis is not automatic. In fact,
a discipline sits in-between defining how a language behaves and
optimizing programs: program analysis. Algorithms that analyze
properties of programs such as [reaching definitions](https://en.wikipedia.org/wiki/Reaching_definition)
enable optimizations such as [loop-invariant code motion](https://en.wikipedia.org/wiki/Loop-invariant_code_motion),
which can have very significant performance impact. At the same time, for an
analysis to be correct, it must be grounded in the program's mathematical
semantics. There are many fascinating techniques in this discipline,
including [ones that use lattice theory](https://cs.au.dk/~amoeller/spa/spa.pdf).
* __Reason 14__: the sub-discipline of program analysis serves as a grounded
application of PL theory to PL practice, enabling numerous optimizations
and transformations.
The programs your compiler generates are software, and, as we just saw,
may need to be tweaked for performance. But the compiler and/or interpreter
is itself a piece of software, and its own performance. Today's language
implementations are subject to demands that hadn't been there historically.
For instance, languages are used to provide [language servers](https://microsoft.github.io/language-server-protocol/)
to enable editors to give users deeper insights into their code. Today,
a language implementation may be called upon every keystroke to provide
a typing user live updates. This has led to the introduction of
techniques like the [query architecture](https://ollef.github.io/blog/posts/query-based-compilers.html)
(see also [salsa](https://github.com/salsa-rs/salsa)) to avoid
redundant work and re-used intermediate results. New language implementations
like that of [Carbon](https://github.com/carbon-language/carbon-lang)
are exploring alternative representations of programs in memory. In
short,
* __Reason 15__: language implementations are themselves pieces of software,
subject to unique constraints and requiring careful and innovative
engineering.
### Conclusion
I've now given a tour of ways in which I found the PL field compelling,
organized across three broad categories. There is just one more reason
I'd like to share.
I was 16 years old when I got involved with the world of programming
languages and compilers. Though I made efforts to learn about it through
literature (the _Dragon Book_, and _Modern Compiler Design_), I simply
didn't have the background to find these resources accessible. However, all
was not lost. The PL community online has been, and still is, a vibrant and
enthusiastic place. I have found it to be welcoming of folks with backgrounds
spanning complete beginners and experts alike. Back then, it gave me
accessible introductions to anything I wanted. Now, every week I see new
articles go by that challenge my intuitions, teach me new things, or take PL
ideas to absurd and humorous extremes. So, my final reason:
* __Reason 16__: the programming languages community is full of brilliant,
kind, welcoming and enthusiastic people, who dedicate much of their
time to spreading the joy of the field.
I ❤️ you.

View File

@@ -17,7 +17,8 @@ spend time explaining dependent types, nor the syntax for them in Idris,
which is the language I'll use in this article. Below are a few resources which is the language I'll use in this article. Below are a few resources
that should help you get up to speed. that should help you get up to speed.
{{< todo >}}List resources{{< /todo >}} > [!TODO]
> List resources
We've seen that, given a function `F a -> a`, we can define a function We've seen that, given a function `F a -> a`, we can define a function
`B -> a`, if `F` is a base functor of the type `B`. However, what if `B -> a`, if `F` is a base functor of the type `B`. However, what if

View File

@@ -25,13 +25,16 @@ class KatexRenderer
end end
def substitute(content) def substitute(content)
found_any = false
rendered = content.gsub /\\\(((?:[^\\]|\\[^\)])*)\\\)/ do |match| rendered = content.gsub /\\\(((?:[^\\]|\\[^\)])*)\\\)/ do |match|
found_any = true
render(false, $~[1]) render(false, $~[1])
end end
rendered = rendered.gsub /\$\$((?:[^\$]|$[^\$])*)\$\$/ do |match| rendered = rendered.gsub /\$\$((?:[^\$]|$[^\$])*)\$\$/ do |match|
found_any = true
render(true, $~[1]) render(true, $~[1])
end end
return rendered return rendered, found_any
end end
end end
@@ -58,8 +61,20 @@ renderer = KatexRenderer.new(katex)
files.each do |file| files.each do |file|
puts "Rendering file: #{file}" puts "Rendering file: #{file}"
document = Nokogiri::HTML.parse(File.open(file)) document = Nokogiri::HTML.parse(File.open(file))
found_any = false
document.search('//*[not(ancestor-or-self::code or ancestor-or-self::script)]/text()').each do |t| document.search('//*[not(ancestor-or-self::code or ancestor-or-self::script)]/text()').each do |t|
t.replace(renderer.substitute(t.content)) rendered, found_any_in_text = renderer.substitute(t.content)
found_any ||= found_any_in_text
t.replace(rendered)
end end
# If we didn't find any mathematical equations, no need to include KaTeX CSS.
# Disabled here because Bergamot technically doesn't require math blocks
# on the page but does need the CSS.
#
# unless found_any
# document.css('link[href$="katex.css"], link[href$="katex.min.css"]').each(&:remove)
# end
File.write(file, document.to_html(encoding: 'UTF-8')) File.write(file, document.to_html(encoding: 'UTF-8'))
end end

View File

@@ -1,2 +1,2 @@
{{ $style := resources.Get "scss/donate.scss" | resources.ToCSS | resources.Minify }} {{ $style := resources.Get "scss/donate.scss" | css.Sass | resources.Minify }}
<link rel="stylesheet" href="{{ $style.Permalink }}"> <link rel="stylesheet" href="{{ $style.Permalink }}">

View File

@@ -1,2 +1,2 @@
{{ $style := resources.Get "scss/gmachine.scss" | resources.ToCSS | resources.Minify }} {{ $style := resources.Get "scss/gmachine.scss" | css.Sass | resources.Minify }}
<link rel="stylesheet" href="{{ $style.Permalink }}"> <link rel="stylesheet" href="{{ $style.Permalink }}">

View File

@@ -1,2 +1,2 @@
{{ $style := resources.Get "scss/stack.scss" | resources.ToCSS | resources.Minify }} {{ $style := resources.Get "scss/stack.scss" | css.Sass | resources.Minify }}
<link rel="stylesheet" href="{{ $style.Permalink }}"> <link rel="stylesheet" href="{{ $style.Permalink }}">

View File

@@ -3,7 +3,7 @@
<html lang="{{ .Site.Language.Lang }}"> <html lang="{{ .Site.Language.Lang }}">
{{- partial "head.html" . -}} {{- partial "head.html" . -}}
<body> <body>
{{ $voidcss := resources.Get "scss/thevoid.scss" | resources.ToCSS | resources.Minify }} {{ $voidcss := resources.Get "scss/thevoid.scss" | css.Sass | resources.Minify }}
<link rel="stylesheet" href="{{ $voidcss.Permalink }}"> <link rel="stylesheet" href="{{ $voidcss.Permalink }}">
{{- partial "header.html" . -}} {{- partial "header.html" . -}}
<div class="container"><hr class="header-divider"></div> <div class="container"><hr class="header-divider"></div>