Commit 61875346 authored by ale's avatar ale

Merge branch 'replay-protection' into 'master'

Replay protection

Closes #4

See merge request !1
parents fc2ad88c 797fa144
Pipeline #1349 passed with stages
in 1 minute and 21 seconds
......@@ -40,6 +40,18 @@ The authentication server data model is based on the concept of a
in LDAP, but it has to be told the specific details of how to find
them and how to map the information there to what it needs.
## Other Dependencies
The auth-server can optionally use *memcached* to store short-term
data with a relatively high probability of retrieval. This is used to
store U2F challenges, and used OTP tokens for replay protection. If no
memcache servers are configured, such functionality will be disabled
but the auth-server will still run (useful for tests, or simpler
deployments).
It is possible to specify multiple memcached servers for HA purposes,
with a *write-all / read-any* model.
# Configuration
The behavior of auth-server can be configured with a YAML file.
......@@ -65,6 +77,8 @@ The YAML file should contain a dictionary with the following attributes:
* `cert` is the path to the client certificate
* `key` is the path to the client private key
* `ca` is the path to the CA store to verify the server certificate
* `memcache_servers` contains a list of memcached server addresses (in
host:port format)
## Rate limiting
......
......@@ -70,6 +70,13 @@ type UserBackend interface {
GetUser(context.Context, *BackendSpec, string) (*User, bool)
}
// OTPShortTermStorage stores short-term otp tokens for replay
// protection purposes.
type OTPShortTermStorage interface {
AddToken(string, string) error
HasToken(string, string) bool
}
// U2FShortTermStorage stores short-term u2f challenges.
type U2FShortTermStorage interface {
SetUserChallenge(string, *u2f.Challenge) error
......@@ -199,10 +206,6 @@ func (c *ServiceConfig) notifyBlacklists(user *User, req *auth.Request, resp *au
}
}
type u2fShortTermStorageConfig struct {
Servers []string `yaml:"memcache_servers"`
}
// Config for the authentication server.
type Config struct {
// Global configuration for backends.
......@@ -223,9 +226,8 @@ type Config struct {
// Configuration for the user-meta-server backend.
UserMetaDBConfig *clientutil.BackendConfig `yaml:"user_meta_server"`
// Configuration for the U2F short-term challenge storage
// (backed by memcached).
U2FShortTerm *u2fShortTermStorageConfig `yaml:"u2f_short_term_storage"`
// Memcache servers used for short-term storage.
MemcacheServers []string `yaml:"memcache_servers"`
// Runtime versions of the above. These objects are shared by
// all services, as they contain the actual map data.
......@@ -376,6 +378,7 @@ type Server struct {
backends []UserBackend
config *Config
u2fShortTerm U2FShortTermStorage
otpShortTerm OTPShortTermStorage
}
func newError() *auth.Response {
......@@ -392,9 +395,14 @@ func NewServer(config *Config) (*Server, error) {
config: config,
}
if config.U2FShortTerm != nil {
s.u2fShortTerm = newMemcacheU2FStorage(config.U2FShortTerm.Servers)
var cache cacheClient
if len(config.MemcacheServers) > 0 {
cache = newMemcacheReplicatedClient(config.MemcacheServers)
} else {
cache = newInprocessCache()
}
s.u2fShortTerm = newU2FStorage(cache)
s.otpShortTerm = newOTPStorage(cache)
for _, name := range config.EnabledBackends {
var b UserBackend
......@@ -563,7 +571,11 @@ func (s *Server) authenticateUserWith2FA(user *User, req *auth.Request) (*auth.R
}
return nil, errors.New("bad U2F response")
case req.OTP != "":
if user.HasOTP() && checkOTP(req.OTP, user.TOTPSecret) {
if user.HasOTP() && s.checkOTP(user, req.OTP, user.TOTPSecret) {
// Save the token for replay protection.
if err := s.otpShortTerm.AddToken(user.Name, req.OTP); err != nil {
log.Printf("error saving OTP token to short-term storage: %v", err)
}
return newOK(), nil
}
return nil, errors.New("bad OTP")
......@@ -616,6 +628,12 @@ func checkPassword(password, hash []byte) bool {
return pwhash.ComparePassword(string(hash), string(password))
}
func checkOTP(otp, secret string) bool {
func (s *Server) checkOTP(user *User, otp, secret string) bool {
// Check our short-ttl blacklist for the token (replay protection).
if s.otpShortTerm.HasToken(user.Name, otp) {
log.Printf("replay protection triggered for %s", user.Name)
return false
}
return totp.Validate(otp, secret)
}
......@@ -149,6 +149,7 @@ func runAuthenticationTest(t *testing.T, client client.Client) {
{"2fauser", "bad_password", validOTP, auth.StatusError, auth.TFAMethodNone},
{"2fauser", "password", "", auth.StatusInsufficientCredentials, auth.TFAMethodOTP},
{"2fauser", "password", validOTP, auth.StatusOK, auth.TFAMethodNone},
{"2fauser", "password", validOTP, auth.StatusError, auth.TFAMethodNone}, // fails due to replay protection
{"2fauser", "password", "123456", auth.StatusError, auth.TFAMethodNone},
}
for _, td := range testdata2 {
......
package server
import (
"errors"
"sync"
"time"
"github.com/bradfitz/gomemcache/memcache"
cache "github.com/patrickmn/go-cache"
)
// Client for a short-term cache.
//
// Data should only be consistent over a short period of time, and the
// worst case scenario (challenge can't be retrieved) will simply
// cause the user to retry, so even in a replicated setup we do not
// need a strong consistency strategy: we simply fan out all reads and
// writes to all memcache servers in parallel.
type cacheClient interface {
writeAll(string, []byte, int) error
readAny(string) ([]byte, bool)
}
// A cacheClient that uses one or more memcached servers.
type memcacheReplicatedClient struct {
caches []*memcache.Client
}
func newMemcacheReplicatedClient(servers []string) *memcacheReplicatedClient {
var m memcacheReplicatedClient
for _, s := range servers {
c := memcache.New(s)
c.Timeout = u2fClientTimeout
c.MaxIdleConns = u2fClientMaxIdleConns
m.caches = append(m.caches, c)
}
return &m
}
func (m *memcacheReplicatedClient) writeAll(key string, value []byte, ttl int) error {
item := &memcache.Item{
Key: key,
Value: value,
Expiration: int32(ttl),
}
// Write to the memcache servers. At least one write must succeed.
ch := make(chan error, len(m.caches))
defer close(ch)
for _, c := range m.caches {
go func(c *memcache.Client) {
ch <- c.Set(item)
}(c)
}
var ok bool
for i := 0; i < len(m.caches); i++ {
if err := <-ch; err == nil {
ok = true
}
}
if !ok {
return errors.New("all memcache servers failed")
}
return nil
}
func (m *memcacheReplicatedClient) readAny(key string) ([]byte, bool) {
// Run all reads in parallel, return the first non-error result.
//
// This would be better if the memcache API took a Context, so
// we could cancel all pending calls as soon as a result is
// received. This way, we keep them running in the background,
// ignore their results, and fire a goroutine to avoid leaking
// the result channel.
ch := make(chan []byte, 1)
var wg sync.WaitGroup
for _, c := range m.caches {
wg.Add(1)
go func(c *memcache.Client) {
defer wg.Done()
if item, err := c.Get(key); err == nil {
select {
case ch <- item.Value:
default:
}
}
}(c)
}
go func() {
wg.Wait()
close(ch)
}()
value := <-ch
if value == nil {
return nil, false
}
return value, true
}
// A cacheClient that uses an in-process cache.
type inprocessCache struct {
cache *cache.Cache
}
func newInprocessCache() *inprocessCache {
return &inprocessCache{
cache: cache.New(5*time.Minute, 10*time.Minute),
}
}
func (c *inprocessCache) writeAll(key string, value []byte, ttl int) error {
// Force the value to string in order to make a copy.
c.cache.Set(key, string(value), time.Duration(ttl)*time.Second)
return nil
}
func (c *inprocessCache) readAny(key string) ([]byte, bool) {
value, ok := c.cache.Get(key)
if !ok {
return nil, false
}
return []byte(value.(string)), ok
}
package server
import "fmt"
const otpReplayProtectionTTL = 300
type otpStorage struct {
cacheClient
}
func newOTPStorage(cache cacheClient) *otpStorage {
return &otpStorage{cache}
}
func (m *otpStorage) AddToken(username, token string) error {
return m.writeAll(otpMemcacheKey(username, token), []byte{1}, otpReplayProtectionTTL)
}
func (m *otpStorage) HasToken(username, token string) bool {
_, ok := m.readAny(otpMemcacheKey(username, token))
return ok
}
func otpMemcacheKey(username, token string) string {
return fmt.Sprintf("otp/%s/%s", username, token)
}
......@@ -3,114 +3,45 @@ package server
import (
"bytes"
"encoding/gob"
"errors"
"sync"
"time"
"github.com/bradfitz/gomemcache/memcache"
"github.com/tstranex/u2f"
)
var (
u2fClientMaxIdleConns = 5
u2fClientTimeout = 500 * time.Millisecond
u2fCacheExpirationSeconds int32 = 300
u2fClientMaxIdleConns = 5
u2fClientTimeout = 500 * time.Millisecond
u2fCacheExpirationSeconds = 300
)
func init() {
gob.Register(&u2f.Challenge{})
}
// A memcache-backed implementation of the short-term U2F challenge
// storage.
//
// Data should only be consistent over a short period of time, and the
// worst case scenario (challenge can't be retrieved) will simply
// cause the user to retry, so we do not need a strong consistency
// strategy: we simply fan out all reads and writes to all memcache
// servers in parallel.
//
type memcacheU2FStorage struct {
caches []*memcache.Client
// Short-term U2F challenge storage.
type u2fStorage struct {
cacheClient
}
func newMemcacheU2FStorage(servers []string) *memcacheU2FStorage {
var m memcacheU2FStorage
for _, s := range servers {
c := memcache.New(s)
c.Timeout = u2fClientTimeout
c.MaxIdleConns = u2fClientMaxIdleConns
m.caches = append(m.caches, c)
}
return &m
func newU2FStorage(cache cacheClient) *u2fStorage {
return &u2fStorage{cache}
}
func (m *memcacheU2FStorage) SetUserChallenge(user string, chal *u2f.Challenge) error {
func (m *u2fStorage) SetUserChallenge(user string, chal *u2f.Challenge) error {
data, err := serializeU2FChallenge(chal)
if err != nil {
return err
}
item := &memcache.Item{
Key: u2fChallengeKey(user),
Value: data,
Expiration: u2fCacheExpirationSeconds,
}
// Write to the memcache servers. At least one write must succeed.
ch := make(chan error, len(m.caches))
defer close(ch)
for _, c := range m.caches {
go func(c *memcache.Client) {
ch <- c.Set(item)
}(c)
}
var ok bool
for i := 0; i < len(m.caches); i++ {
if err := <-ch; err == nil {
ok = true
}
}
if !ok {
return errors.New("all memcache servers failed")
}
return nil
return m.writeAll(u2fChallengeKey(user), data, u2fCacheExpirationSeconds)
}
func (m *memcacheU2FStorage) GetUserChallenge(user string) (*u2f.Challenge, bool) {
// Run all reads in parallel, return the first non-error result.
//
// This would be better if the memcache API took a Context, so
// we could cancel all pending calls as soon as a result is
// received. This way, we keep them running in the background,
// ignore their results, and fire a goroutine to avoid leaking
// the result channel.
ch := make(chan *u2f.Challenge, 1)
var wg sync.WaitGroup
for _, c := range m.caches {
wg.Add(1)
go func(c *memcache.Client) {
defer wg.Done()
item, err := c.Get(u2fChallengeKey(user))
if err != nil {
return
}
chal, _ := deserializeU2FChallenge(item.Value) // nolint
select {
case ch <- chal:
default:
}
}(c)
func (m *u2fStorage) GetUserChallenge(user string) (*u2f.Challenge, bool) {
value, ok := m.readAny(u2fChallengeKey(user))
if !ok {
return nil, false
}
go func() {
wg.Wait()
close(ch)
}()
chal := <-ch
if chal == nil {
chal, err := deserializeU2FChallenge(value)
if err != nil {
return nil, false
}
return chal, true
......
This is a list of people who have contributed code to go-cache. They, or their
employers, are the copyright holders of the contributed code. Contributed code
is subject to the license restrictions listed in LICENSE (as they were when the
code was contributed.)
Dustin Sallings <dustin@spy.net>
Jason Mooberry <jasonmoo@me.com>
Sergey Shepelev <temotor@gmail.com>
Alex Edwards <ajmedwards@gmail.com>
Copyright (c) 2012-2018 Patrick Mylund Nielsen and the go-cache contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
# go-cache
go-cache is an in-memory key:value store/cache similar to memcached that is
suitable for applications running on a single machine. Its major advantage is
that, being essentially a thread-safe `map[string]interface{}` with expiration
times, it doesn't need to serialize or transmit its contents over the network.
Any object can be stored, for a given duration or forever, and the cache can be
safely used by multiple goroutines.
Although go-cache isn't meant to be used as a persistent datastore, the entire
cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
items map to serialize, and `NewFrom()` to create a cache from a deserialized
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
### Installation
`go get github.com/patrickmn/go-cache`
### Usage
```go
import (
"fmt"
"github.com/patrickmn/go-cache"
"time"
)
func main() {
// Create a cache with a default expiration time of 5 minutes, and which
// purges expired items every 10 minutes
c := cache.New(5*time.Minute, 10*time.Minute)
// Set the value of the key "foo" to "bar", with the default expiration time
c.Set("foo", "bar", cache.DefaultExpiration)
// Set the value of the key "baz" to 42, with no expiration time
// (the item won't be removed until it is re-set, or removed using
// c.Delete("baz")
c.Set("baz", 42, cache.NoExpiration)
// Get the string associated with the key "foo" from the cache
foo, found := c.Get("foo")
if found {
fmt.Println(foo)
}
// Since Go is statically typed, and cache values can be anything, type
// assertion is needed when values are being passed to functions that don't
// take arbitrary types, (i.e. interface{}). The simplest way to do this for
// values which will only be used once--e.g. for passing to another
// function--is:
foo, found := c.Get("foo")
if found {
MyFunction(foo.(string))
}
// This gets tedious if the value is used several times in the same function.
// You might do either of the following instead:
if x, found := c.Get("foo"); found {
foo := x.(string)
// ...
}
// or
var foo string
if x, found := c.Get("foo"); found {
foo = x.(string)
}
// ...
// foo can then be passed around freely as a string
// Want performance? Store pointers!
c.Set("foo", &MyStruct, cache.DefaultExpiration)
if x, found := c.Get("foo"); found {
foo := x.(*MyStruct)
// ...
}
}
```
### Reference
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)
This diff is collapsed.
package cache
import (
"crypto/rand"
"math"
"math/big"
insecurerand "math/rand"
"os"
"runtime"
"time"
)
// This is an experimental and unexported (for now) attempt at making a cache
// with better algorithmic complexity than the standard one, namely by
// preventing write locks of the entire cache when an item is added. As of the
// time of writing, the overhead of selecting buckets results in cache
// operations being about twice as slow as for the standard cache with small
// total cache sizes, and faster for larger ones.
//
// See cache_test.go for a few benchmarks.
type unexportedShardedCache struct {
*shardedCache
}
type shardedCache struct {
seed uint32
m uint32
cs []*cache
janitor *shardedJanitor
}
// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
func djb33(seed uint32, k string) uint32 {
var (
l = uint32(len(k))
d = 5381 + seed + l
i = uint32(0)
)
// Why is all this 5x faster than a for loop?
if l >= 4 {
for i < l-4 {
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
d = (d * 33) ^ uint32(k[i+2])
d = (d * 33) ^ uint32(k[i+3])
i += 4
}
}
switch l - i {
case 1:
case 2:
d = (d * 33) ^ uint32(k[i])
case 3:
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
case 4:
d = (d * 33) ^ uint32(k[i])
d = (d * 33) ^ uint32(k[i+1])
d = (d * 33) ^ uint32(k[i+2])
}
return d ^ (d >> 16)
}
func (sc *shardedCache) bucket(k string) *cache {
return sc.cs[djb33(sc.seed, k)%sc.m]
}
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
sc.bucket(k).Set(k, x, d)
}
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Add(k, x, d)
}
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Replace(k, x, d)
}
func (sc *shardedCache) Get(k string) (interface{}, bool) {
return sc.bucket(k).Get(k)
}
func (sc *shardedCache) Increment(k string, n int64) error {
return sc.bucket(k).Increment(k, n)
}
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
return sc.bucket(k).IncrementFloat(k, n)
}
func (sc *shardedCache) Decrement(k string, n int64) error {
return sc.bucket(k).Decrement(k, n)
}
func (sc *shardedCache) Delete(k string) {
sc.bucket(k).Delete(k)
}
func (sc *shardedCache) DeleteExpired() {
for _, v := range sc.cs {
v.DeleteExpired()
}
}
// Returns the items in the cache. This may include items that have expired,
// but have not yet been cleaned up. If this is significant, the Expiration
// fields of the items should be checked. Note that explicit synchronization
// is needed to use a cache and its corresponding Items() return values at
// the same time, as the maps are shared.
func (sc *shardedCache) Items() []map[string]Item {
res := make([]map[string]Item, len(sc.cs))
for i, v := range sc.cs {
res[i] = v.Items()
}