diff --git a/fe/dns.go b/fe/dns.go
index 77c3f2d58ce9096ac321c06ba68d905c7de527b7..09ea0b84a96f865ba565632474c9d346094e1a97 100644
--- a/fe/dns.go
+++ b/fe/dns.go
@@ -26,6 +26,7 @@ var (
 	}
 )
 
+// DNS server.
 type DnsRedirector struct {
 	client         *radioai.RadioAPI
 	origin         string
@@ -35,6 +36,8 @@ type DnsRedirector struct {
 	soa            dns.RR
 }
 
+// NewDnsRedirector returns a DNS server for the given origin and
+// publicIp. The A records served will have the specified ttl.
 func NewDnsRedirector(client *radioai.RadioAPI, origin, publicIp string, ttl int) *DnsRedirector {
 	if !strings.HasSuffix(origin, ".") {
 		origin += "."
@@ -116,6 +119,7 @@ func (d *DnsRedirector) recordForIp(name string, ip string) *dns.A {
 	}
 }
 
+// Strip the origin from the query.
 func (d *DnsRedirector) getQuestionName(req *dns.Msg) string {
 	lx := dns.SplitDomainName(req.Question[0].Name)
 	ql := lx[0 : len(lx)-d.originNumParts]
@@ -190,6 +194,8 @@ func (d *DnsRedirector) serveDNS(w dns.ResponseWriter, req *dns.Msg) {
 	w.WriteMsg(m)
 }
 
+// Run starts the DNS servers on the given address (both tcp and udp).
+// It creates new goroutines and returns immediately.
 func (d *DnsRedirector) Run(addr string) {
 	dns.HandleFunc(d.origin, func(w dns.ResponseWriter, r *dns.Msg) {
 		d.serveDNS(w, r)
diff --git a/fe/doc.go b/fe/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..6be3e4083d51d1ebf4bad1b70a3617710fd4e468
--- /dev/null
+++ b/fe/doc.go
@@ -0,0 +1,37 @@
+// The front-end ('fe') code has the purpose of directing user traffic
+// where we want it: that is, on a node that is alive and (possibly)
+// not overloaded. We do this at two different levels, DNS and HTTP,
+// with two slightly different targets: the former is focused on
+// availability, while the latter attempts to evenly distribute
+// resource usage.
+//
+// DNS is used to provide at least one address of an active server:
+// the capability to return multiple results, and the high-ttl nature
+// of the service mean that we can simply return all the active nodes
+// on every request, maximizing the chances that at least one of them
+// will be active over a longer period of time.
+//
+// HTTP requests are istantaneous, and we can't rely on the client
+// doing retries, so we must point the user at a single, active node
+// on every request. There are two different policies, depending on
+// the type of the request:
+//
+// - SOURCE requests must always reach the current master node. Since
+// redirects tend to confuse streaming sources, which might have very
+// simple HTTP implementations, we simply proxy the stream to the
+// master node.
+//
+// - listener requests must be routed to an available relay, taking
+// utilization into account (be it in terms of bandwidth, cpu usage,
+// or more). The fact that the DNS layer returns multiple addresses
+// provides already a very rough form of load balancing, but for
+// accurate bandwidth planning we can't just rely on clients'
+// cooperation. So when a client requests a stream using its public
+// URL we need to serve a redirect to the desired node, computed
+// according to the load balancing policy. This is currently done by
+// serving a M3U file pointing directly at the target node's icecast
+// daemon (but this may lock clients to that specific target node on
+// failure... client reconnection policies still need some
+// investigation).
+//
+package fe
diff --git a/node/doc.go b/node/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..17645cdd4e10c472798392fa584a0fdaa306145c
--- /dev/null
+++ b/node/doc.go
@@ -0,0 +1,18 @@
+// The 'node' supervisor is the daemon that controls the Icecast
+// server, updating its configuration according to what is stored in
+// the distributed database (etcd).
+//
+// A configuration change will regenerate the Icecast configuration
+// file and trigger a reload of the Icecast daemon, which implies that
+// the node takes ownership of the entire Icecast config (manual edits
+// will be overwritten).
+//
+// Nodes run a presence protocol (using the database) to discover each
+// other and ensure that the full list of active nodes is available to
+// clients at any given time.
+//
+// Nodes also run a master election protocol: a single 'master' server
+// is elected among them, which will act as the receiver for the
+// source streams; the other nodes will be configured as relays.
+//
+package node
diff --git a/node/icecast.go b/node/icecast.go
index 3d40a993e6118c8a91bec33e7d5c499c7ab0f119..50b98a28c219588b52b165c664e936088c4038ee 100644
--- a/node/icecast.go
+++ b/node/icecast.go
@@ -30,6 +30,7 @@ func (ic *IcecastController) reload() error {
 	return err
 }
 
+// Update reloads the Icecast daemon with a new configuration.
 func (ic *IcecastController) Update(conf *ClusterConfig, isMaster bool, masterAddr string) error {
 	if !isMaster && masterAddr == "" {
 		return errors.New("unknown system state")
diff --git a/node/icecast_config.go b/node/icecast_config.go
index 10060a4592dd2b0571dc8b02c3fdead7a7681fea..f7b7c5a9cf18805286a92698039988f50806bdda 100644
--- a/node/icecast_config.go
+++ b/node/icecast_config.go
@@ -80,6 +80,8 @@ type iceMountConfig struct {
 	OnDisconnect     string `xml:"on-disconnect,omitempty"`
 }
 
+// Configuration of the local Icecast daemon (meant for serialization
+// to XML).
 type icecastConfig struct {
 	XMLName   xml.Name
 	Limits    iceLimitsConfig         `xml:"limits"`
@@ -94,6 +96,20 @@ type icecastConfig struct {
 	Mounts    []iceMountConfig        `xml:"mount"`
 }
 
+// Create an Icecast configuration suitable for a Debian-based system
+// install of the 'icecast2' package. Things to note about the
+// generated config:
+//
+// - It binds to the IcecastPort (defined in api.go) on all
+// interfaces.
+//
+// - Local administration is practically disabled. A random admin
+// password is created every time the daemon starts. Same goes for the
+// global source password.
+//
+// Some of the parameters should probably be command-line flags, so
+// that it is possible to set them on a per-host basis.
+//
 func defaultDebianConfig(publicIp string) *icecastConfig {
 	// Pick some random passwords on startup. We don't use them,
 	// but icecast is happier if they're set.
@@ -144,6 +160,7 @@ func newIcecastConfig(publicIp string) *icecastConfig {
 	return defaultDebianConfig(publicIp)
 }
 
+// Encode the configuration to XML.
 func (c *icecastConfig) Encode() ([]byte, error) {
 	var buf bytes.Buffer
 
@@ -158,6 +175,7 @@ func (c *icecastConfig) Encode() ([]byte, error) {
 	return buf.Bytes(), nil
 }
 
+// EncodeToFile writes the configuration to a file.
 func (c *icecastConfig) EncodeToFile(path string) error {
 	file, err := os.Create(path)
 	if err != nil {
@@ -202,6 +220,9 @@ func mountToRelay(masterAddr string, m *radioai.Mount) iceRelayConfig {
 	}
 }
 
+// Update the configuration with the current list of mounts and
+// masterelection state. This will clear the Mounts and Relays fields
+// and set them to new values.
 func (ic *icecastConfig) Update(config *ClusterConfig, isMaster bool, masterAddr string) {
 	ic.Mounts = nil
 	ic.Relays = nil