Skip to content
Snippets Groups Projects
Commit 6c9f8d31 authored by ale's avatar ale
Browse files

update to the etcd v2 API

parent 93e5231a
Branches
Tags
No related merge requests found
...@@ -18,7 +18,10 @@ var ( ...@@ -18,7 +18,10 @@ var (
MountPrefix = "/icecast/mounts/" MountPrefix = "/icecast/mounts/"
NodePrefix = "/icecast/nodes/" NodePrefix = "/icecast/nodes/"
IcecastPort = 8000 IcecastPort = 8000
ErrIsDirectory = errors.New("key is a directory")
ErrIsFile = errors.New("key is a file")
) )
// A mountpoint for a stream. // A mountpoint for a stream.
...@@ -40,7 +43,8 @@ func mountPath(mountName string) string { ...@@ -40,7 +43,8 @@ func mountPath(mountName string) string {
return MountPrefix + mountName[1:] return MountPrefix + mountName[1:]
} }
// Cache the list of active nodes. // Cache the list of active nodes (the front-ends that need to
// retrieve this information continuously, so we limit them to 2qps).
type nodesCache struct { type nodesCache struct {
ttl time.Duration ttl time.Duration
nodes []string nodes []string
...@@ -89,16 +93,16 @@ func NewRadioAPI(client *etcd.Client) *RadioAPI { ...@@ -89,16 +93,16 @@ func NewRadioAPI(client *etcd.Client) *RadioAPI {
// GetMount returns data on a specific mountpoint (returns nil if not // GetMount returns data on a specific mountpoint (returns nil if not
// found). // found).
func (r *RadioAPI) GetMount(mountName string) (*Mount, error) { func (r *RadioAPI) GetMount(mountName string) (*Mount, error) {
response, err := r.client.Get(mountPath(mountName)) response, err := r.client.Get(mountPath(mountName), false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(response) != 1 { if response.Dir {
return nil, nil return nil, ErrIsDirectory
} }
var m Mount var m Mount
if err := json.NewDecoder(strings.NewReader(response[0].Value)).Decode(&m); err != nil { if err := json.NewDecoder(strings.NewReader(response.Value)).Decode(&m); err != nil {
return nil, err return nil, err
} }
return &m, nil return &m, nil
...@@ -123,14 +127,21 @@ func (r *RadioAPI) DelMount(mountName string) error { ...@@ -123,14 +127,21 @@ func (r *RadioAPI) DelMount(mountName string) error {
// ListMounts returns a list of all the configured mountpoints. // ListMounts returns a list of all the configured mountpoints.
func (r *RadioAPI) ListMounts() ([]*Mount, error) { func (r *RadioAPI) ListMounts() ([]*Mount, error) {
response, err := r.client.Get(MountPrefix) response, err := r.client.Get(MountPrefix, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
result := make([]*Mount, 0, len(response)) if !response.Dir {
for _, entry := range response { return nil, ErrIsFile
}
result := make([]*Mount, 0, len(response.Kvs))
for _, kv := range response.Kvs {
if kv.Dir {
continue
}
var m Mount var m Mount
if err := json.NewDecoder(strings.NewReader(entry.Value)).Decode(&m); err != nil { if err := json.NewDecoder(strings.NewReader(kv.Value)).Decode(&m); err != nil {
continue continue
} }
result = append(result, &m) result = append(result, &m)
...@@ -140,24 +151,27 @@ func (r *RadioAPI) ListMounts() ([]*Mount, error) { ...@@ -140,24 +151,27 @@ func (r *RadioAPI) ListMounts() ([]*Mount, error) {
// GetMasterAddr returns the address of the current master server. // GetMasterAddr returns the address of the current master server.
func (r *RadioAPI) GetMasterAddr() (string, error) { func (r *RadioAPI) GetMasterAddr() (string, error) {
response, err := r.client.Get(MasterElectionPath) response, err := r.client.Get(MasterElectionPath, false)
if err != nil { if err != nil {
return "", err return "", err
} }
if len(response) < 1 { if response.Dir {
return "", errors.New("no active master") return "", ErrIsDirectory
} }
return response[0].Value, nil return response.Value, nil
} }
// GetNodes returns the list of active cluster nodes. // GetNodes returns the list of active cluster nodes.
func (r *RadioAPI) doGetNodes() ([]string, error) { func (r *RadioAPI) doGetNodes() ([]string, error) {
response, err := r.client.Get(NodePrefix) response, err := r.client.Get(NodePrefix, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
result := make([]string, 0, len(response)) if !response.Dir {
for _, entry := range response { return nil, ErrIsFile
}
result := make([]string, 0, len(response.Kvs))
for _, entry := range response.Kvs {
result = append(result, entry.Value) result = append(result, entry.Value)
} }
return result, nil return result, nil
......
...@@ -104,9 +104,10 @@ func generateUsername(path string) string { ...@@ -104,9 +104,10 @@ func generateUsername(path string) string {
func createMount(args []string) { func createMount(args []string) {
path := args[0] path := args[0]
if !strings.HasPrefix(path, "/") { if strings.Contains(path, "/") {
log.Fatal("Mount points should specify a full path") log.Fatal("Mount points should not contain a slash ('/').")
} }
path = "/" + path
// Check if the mount already exists. // Check if the mount already exists.
client := getClient() client := getClient()
......
...@@ -59,7 +59,7 @@ func NewEtcdClient() *etcd.Client { ...@@ -59,7 +59,7 @@ func NewEtcdClient() *etcd.Client {
c := etcd.NewClient(machines) c := etcd.NewClient(machines)
if proto == "https" { if proto == "https" {
c.SetScheme(etcd.HTTPS) c.SetScheme(etcd.HTTPS)
if _, err := c.SetCertAndKey(loadFile(*etcdCertFile), loadFile(*etcdKeyFile)); err != nil { if err := c.SetCertAndKey(loadFile(*etcdCertFile), loadFile(*etcdKeyFile)); err != nil {
log.Fatal("Error setting up SSL for etcd client: %s", err) log.Fatal("Error setting up SSL for etcd client: %s", err)
} }
} }
......
...@@ -23,8 +23,8 @@ func stateToString(state int) string { ...@@ -23,8 +23,8 @@ func stateToString(state int) string {
} }
type MasterElection struct { type MasterElection struct {
client *etcd.Client client *etcd.Client
stop chan bool stop chan bool
stopped bool stopped bool
Addr string Addr string
...@@ -55,11 +55,11 @@ func (m *MasterElection) IsMaster() bool { ...@@ -55,11 +55,11 @@ func (m *MasterElection) IsMaster() bool {
} }
func (m *MasterElection) GetMasterAddr() string { func (m *MasterElection) GetMasterAddr() string {
responses, err := m.client.Get(m.Path) response, err := m.client.Get(m.Path, false)
if err != nil || len(responses) != 1 { if err != nil {
return "" return ""
} }
return responses[0].Value return response.Value
} }
func (m *MasterElection) setState(state int) { func (m *MasterElection) setState(state int) {
...@@ -87,42 +87,113 @@ func (m *MasterElection) stopper() { ...@@ -87,42 +87,113 @@ func (m *MasterElection) stopper() {
// Remove the lock file if we are the master. // Remove the lock file if we are the master.
if m.State == STATE_MASTER { if m.State == STATE_MASTER {
log.Printf("releasing masterelection lock")
m.client.Delete(m.Path) m.client.Delete(m.Path)
} }
} }
func boolTimer(delay time.Duration) chan bool {
ch := make(chan bool)
go func() {
time.Sleep(delay)
close(ch)
}()
return ch
}
func (m *MasterElection) loop() {
}
func (m *MasterElection) runMaster(index uint64) {
m.setState(STATE_MASTER)
// If we renew the lease every TTL / N, we allow N renewal
// errors before we stop believing being the master.
ttl := time.Second * time.Duration(m.TTL)
tick := time.NewTicker(ttl / 3)
lastUpdate := time.Now()
for {
select {
case t := <- tick.C:
// To verify that we actually are still the
// master (not just we believe we are), try
// yet another compare-and-swap to check that
// the stored master address is still our own,
// and no-one stole our lock. If not, the TTL
// will be updated (and the lock renewed).
response, err := m.client.CompareAndSwap(m.Path, m.Addr, m.TTL, m.Addr, index)
if err != nil {
log.Printf("error updating lock: %s", err)
// If we can't renew the lock for a
// TTL, we must assume we lost it.
if t.Sub(lastUpdate) > ttl {
log.Printf("too many errors, lost lock")
return
}
}
index = response.ModifiedIndex
lastUpdate = t
case <-m.stop:
return
}
}
}
func (m *MasterElection) runSlave(index uint64) {
m.setState(STATE_SLAVE)
for {
// Start a watch on the lock, waiting for its removal.
response, err := m.client.Watch(m.Path, index+1, nil, m.stop)
if err != nil {
log.Printf("slave Watch() error: %+v", err)
return
}
if response.Action == "delete" || response.Action == "expire" {
return
}
index = response.ModifiedIndex
}
}
func (m *MasterElection) Run() { func (m *MasterElection) Run() {
go m.stopper() go m.stopper()
// Start as a slave. // Start as a slave.
m.setState(STATE_SLAVE) m.setState(STATE_SLAVE)
halfttl := time.Second * time.Duration(m.TTL / 2) var watchIndex uint64
for !m.stopped { for !m.stopped {
// Since a failed Create does not return the
// RAFT index, let's optimistically query the lock
// before starting just to set a baseline for the
// index.
if iresponse, err := m.client.Get(m.Path, false); err == nil {
log.Printf("lock already exists: %+v", iresponse)
watchIndex = iresponse.ModifiedIndex
}
// Try to acquire the lock. If we are currently the // Try to acquire the lock. If we are currently the
// master, the previous value should be our own // master, the previous value should be our own
// address, otherwise it should be unset. // address, otherwise it should be unset.
prevValue := "" response, err := m.client.Create(m.Path, m.Addr, m.TTL)
if m.State == STATE_MASTER {
prevValue = m.Addr if err == nil {
}
if _, ok, _ := m.client.TestAndSet(m.Path, prevValue, m.Addr, m.TTL); ok {
// Howdy, we're the master now. Wait a while // Howdy, we're the master now. Wait a while
// and renew our TTL. // and renew our TTL.
m.setState(STATE_MASTER) m.runMaster(response.ModifiedIndex)
time.Sleep(halfttl)
} else { } else {
// We're not the master. We could wait for a // We're not the master. Wait until the lock
// DELETE but I'm not sure if that's what you // is deleted or expires.
// get on expiry, so we just wait for the m.runSlave(watchIndex)
// first event which will be another SET from
// the current master. Oh well.
m.setState(STATE_SLAVE)
_, err := m.client.Watch(m.Path, 0, nil, nil)
if err != nil {
log.Printf("%s: watch error: %s", m.Path, err)
}
} }
} }
} }
...@@ -86,7 +86,7 @@ func NewConfigSyncer(client *etcd.Client, config *ClusterConfig, upch chan bool, ...@@ -86,7 +86,7 @@ func NewConfigSyncer(client *etcd.Client, config *ClusterConfig, upch chan bool,
return &ConfigSyncer{ return &ConfigSyncer{
client: client, client: client,
config: config, config: config,
rch: make(chan *etcd.Response, 10), rch: make(chan *etcd.Response),
upch: upch, upch: upch,
stop: stop, stop: stop,
} }
...@@ -97,15 +97,13 @@ func (w *ConfigSyncer) syncer() { ...@@ -97,15 +97,13 @@ func (w *ConfigSyncer) syncer() {
for { for {
select { select {
case response := <-w.rch: case response := <-w.rch:
if response.Action == "delete" {
switch response.Action {
case "DELETE":
mountName := keyToMount(response.Key) mountName := keyToMount(response.Key)
log.Printf("deleted mount %s", mountName) log.Printf("deleted mount %s", mountName)
w.config.delMount(mountName) w.config.delMount(mountName)
case "SET": } else if response.Action == "set" || response.Action == "create" || response.Action == "update" {
w.updateConfigWithResponse(response) w.updateConfigWithResponse(response.Key, response.Value)
default: } else {
continue continue
} }
...@@ -113,7 +111,7 @@ func (w *ConfigSyncer) syncer() { ...@@ -113,7 +111,7 @@ func (w *ConfigSyncer) syncer() {
// the Watcher dies, it knows where to start // the Watcher dies, it knows where to start
// from and we do not have to download the // from and we do not have to download the
// full configuration again. // full configuration again.
w.index = response.Index w.index = response.ModifiedIndex
// Trigger an update. // Trigger an update.
trigger(w.upch) trigger(w.upch)
...@@ -124,12 +122,12 @@ func (w *ConfigSyncer) syncer() { ...@@ -124,12 +122,12 @@ func (w *ConfigSyncer) syncer() {
} }
} }
func (w *ConfigSyncer) updateConfigWithResponse(response *etcd.Response) { func (w *ConfigSyncer) updateConfigWithResponse(key, value string) {
mountName := keyToMount(response.Key) mountName := keyToMount(key)
log.Printf("updating mount %s: %+v", mountName, response) log.Printf("updating mount %s: %s", mountName, value)
var m radioai.Mount var m radioai.Mount
if err := json.NewDecoder(strings.NewReader(response.Value)).Decode(&m); err != nil { if err := json.NewDecoder(strings.NewReader(value)).Decode(&m); err != nil {
log.Printf("corrupted data: %s: %s", response.Value, err) log.Printf("corrupted data: %s: %s", value, err)
} else { } else {
w.config.setMount(&m) w.config.setMount(&m)
} }
...@@ -142,14 +140,13 @@ func (w *ConfigSyncer) Run() { ...@@ -142,14 +140,13 @@ func (w *ConfigSyncer) Run() {
// Run until the first successful Get(). // Run until the first successful Get().
log.Printf("attempting to retrieve initial config...") log.Printf("attempting to retrieve initial config...")
for { for {
responses, err := w.client.Get(radioai.MountPrefix) response, err := w.client.Get(radioai.MountPrefix, false)
log.Printf("Get(): %+v", responses) if err == nil && response.Dir {
if err == nil {
// Directly update the configuration. // Directly update the configuration.
for _, r := range responses { for _, r := range response.Kvs {
w.updateConfigWithResponse(r) w.updateConfigWithResponse(r.Key, r.Value)
w.index = r.Index
} }
w.index = response.ModifiedIndex
break break
} }
log.Printf("Get error: %s", err) log.Printf("Get error: %s", err)
...@@ -207,8 +204,7 @@ func NewRadioNode(ip string, client *etcd.Client) *RadioNode { ...@@ -207,8 +204,7 @@ func NewRadioNode(ip string, client *etcd.Client) *RadioNode {
// MasterElection changes trigger an update. // MasterElection changes trigger an update.
mech := make(chan int) mech := make(chan int)
go func() { go func() {
for state := range mech { for _ = range mech {
log.Printf("master election status changed: %d", state)
trigger(upch) trigger(upch)
} }
}() }()
...@@ -286,4 +282,7 @@ func (rc *RadioNode) Run() { ...@@ -286,4 +282,7 @@ func (rc *RadioNode) Run() {
// Stop everything. // Stop everything.
func (rc *RadioNode) Stop() { func (rc *RadioNode) Stop() {
close(rc.stop) close(rc.stop)
// We should use WaitGroups here. Instead, wait 2 seconds.
time.Sleep(2 * time.Second)
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment