Skip to content
Snippets Groups Projects
Commit f0b67dfa authored by ale's avatar ale
Browse files

improve documentation

parent 62a90409
No related branches found
No related tags found
No related merge requests found
......@@ -26,6 +26,7 @@ var (
}
)
// DNS server.
type DnsRedirector struct {
client *radioai.RadioAPI
origin string
......@@ -35,6 +36,8 @@ type DnsRedirector struct {
soa dns.RR
}
// NewDnsRedirector returns a DNS server for the given origin and
// publicIp. The A records served will have the specified ttl.
func NewDnsRedirector(client *radioai.RadioAPI, origin, publicIp string, ttl int) *DnsRedirector {
if !strings.HasSuffix(origin, ".") {
origin += "."
......@@ -116,6 +119,7 @@ func (d *DnsRedirector) recordForIp(name string, ip string) *dns.A {
}
}
// Strip the origin from the query.
func (d *DnsRedirector) getQuestionName(req *dns.Msg) string {
lx := dns.SplitDomainName(req.Question[0].Name)
ql := lx[0 : len(lx)-d.originNumParts]
......@@ -190,6 +194,8 @@ func (d *DnsRedirector) serveDNS(w dns.ResponseWriter, req *dns.Msg) {
w.WriteMsg(m)
}
// Run starts the DNS servers on the given address (both tcp and udp).
// It creates new goroutines and returns immediately.
func (d *DnsRedirector) Run(addr string) {
dns.HandleFunc(d.origin, func(w dns.ResponseWriter, r *dns.Msg) {
d.serveDNS(w, r)
......
// The front-end ('fe') code has the purpose of directing user traffic
// where we want it: that is, on a node that is alive and (possibly)
// not overloaded. We do this at two different levels, DNS and HTTP,
// with two slightly different targets: the former is focused on
// availability, while the latter attempts to evenly distribute
// resource usage.
//
// DNS is used to provide at least one address of an active server:
// the capability to return multiple results, and the high-ttl nature
// of the service mean that we can simply return all the active nodes
// on every request, maximizing the chances that at least one of them
// will be active over a longer period of time.
//
// HTTP requests are istantaneous, and we can't rely on the client
// doing retries, so we must point the user at a single, active node
// on every request. There are two different policies, depending on
// the type of the request:
//
// - SOURCE requests must always reach the current master node. Since
// redirects tend to confuse streaming sources, which might have very
// simple HTTP implementations, we simply proxy the stream to the
// master node.
//
// - listener requests must be routed to an available relay, taking
// utilization into account (be it in terms of bandwidth, cpu usage,
// or more). The fact that the DNS layer returns multiple addresses
// provides already a very rough form of load balancing, but for
// accurate bandwidth planning we can't just rely on clients'
// cooperation. So when a client requests a stream using its public
// URL we need to serve a redirect to the desired node, computed
// according to the load balancing policy. This is currently done by
// serving a M3U file pointing directly at the target node's icecast
// daemon (but this may lock clients to that specific target node on
// failure... client reconnection policies still need some
// investigation).
//
package fe
// The 'node' supervisor is the daemon that controls the Icecast
// server, updating its configuration according to what is stored in
// the distributed database (etcd).
//
// A configuration change will regenerate the Icecast configuration
// file and trigger a reload of the Icecast daemon, which implies that
// the node takes ownership of the entire Icecast config (manual edits
// will be overwritten).
//
// Nodes run a presence protocol (using the database) to discover each
// other and ensure that the full list of active nodes is available to
// clients at any given time.
//
// Nodes also run a master election protocol: a single 'master' server
// is elected among them, which will act as the receiver for the
// source streams; the other nodes will be configured as relays.
//
package node
......@@ -30,6 +30,7 @@ func (ic *IcecastController) reload() error {
return err
}
// Update reloads the Icecast daemon with a new configuration.
func (ic *IcecastController) Update(conf *ClusterConfig, isMaster bool, masterAddr string) error {
if !isMaster && masterAddr == "" {
return errors.New("unknown system state")
......
......@@ -80,6 +80,8 @@ type iceMountConfig struct {
OnDisconnect string `xml:"on-disconnect,omitempty"`
}
// Configuration of the local Icecast daemon (meant for serialization
// to XML).
type icecastConfig struct {
XMLName xml.Name
Limits iceLimitsConfig `xml:"limits"`
......@@ -94,6 +96,20 @@ type icecastConfig struct {
Mounts []iceMountConfig `xml:"mount"`
}
// Create an Icecast configuration suitable for a Debian-based system
// install of the 'icecast2' package. Things to note about the
// generated config:
//
// - It binds to the IcecastPort (defined in api.go) on all
// interfaces.
//
// - Local administration is practically disabled. A random admin
// password is created every time the daemon starts. Same goes for the
// global source password.
//
// Some of the parameters should probably be command-line flags, so
// that it is possible to set them on a per-host basis.
//
func defaultDebianConfig(publicIp string) *icecastConfig {
// Pick some random passwords on startup. We don't use them,
// but icecast is happier if they're set.
......@@ -144,6 +160,7 @@ func newIcecastConfig(publicIp string) *icecastConfig {
return defaultDebianConfig(publicIp)
}
// Encode the configuration to XML.
func (c *icecastConfig) Encode() ([]byte, error) {
var buf bytes.Buffer
......@@ -158,6 +175,7 @@ func (c *icecastConfig) Encode() ([]byte, error) {
return buf.Bytes(), nil
}
// EncodeToFile writes the configuration to a file.
func (c *icecastConfig) EncodeToFile(path string) error {
file, err := os.Create(path)
if err != nil {
......@@ -202,6 +220,9 @@ func mountToRelay(masterAddr string, m *radioai.Mount) iceRelayConfig {
}
}
// Update the configuration with the current list of mounts and
// masterelection state. This will clear the Mounts and Relays fields
// and set them to new values.
func (ic *icecastConfig) Update(config *ClusterConfig, isMaster bool, masterAddr string) {
ic.Mounts = nil
ic.Relays = nil
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment