diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9f0bc4464d7310b56d6037b7844c471836eedc8e..3587e74cc84a9ef85c7edcd93078ba086cdd7f8e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,2 +1,16 @@
-include: "https://git.autistici.org/ai3/build-container/raw/master/common.yml"
+include: "https://git.autistici.org/pipelines/containers/raw/master/common.yml"
+
+stages:
+  - build
+  - test
+  - release
+
+test:
+  stage: test
+  image: registry.git.autistici.org/pipelines/images/test/float-podman-runner:master
+  tags: [podman]
+  script:
+    - chown -R 1000 testconf
+    - echo 262144 > /proc/sys/vm/max_map_count
+    - with-container --env=PORT=9200 --mount=type=bind,source=$PWD/testconf,destination=/etc/elasticsearch --mount=type=tmpfs,destination=/var/lib/elasticsearch --mount=type=tmpfs,destination=/var/log/elasticsearch $IMAGE_TAG ./test.sh
 
diff --git a/Dockerfile b/Dockerfile
index 413eba1656b0110a7a147f8e479876912e919e5b..406042d276f7d4e30fba07c09954b9a7abdee70f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM registry.git.autistici.org/ai3/docker/s6-base:master
+FROM registry.git.autistici.org/ai3/docker/s6-overlay-lite:master
 
 COPY elastic.gpg /etc/apt/trusted.gpg.d/
 COPY conf /tmp/conf
diff --git a/conf/services.d/elasticsearch/finish b/conf/services.d/elasticsearch/finish
index b6531b3ca574205c9e78650dbd7c38718683e57c..8f66da993f1d8bf3986e1d264283f3ec178ca21e 100755
--- a/conf/services.d/elasticsearch/finish
+++ b/conf/services.d/elasticsearch/finish
@@ -1,3 +1,2 @@
-#!/usr/bin/execlineb -S0
-
-s6-svscanctl -t /var/run/s6/services
+#!/bin/sh
+s6-svscanctl -t /run/s6/service
diff --git a/test.sh b/test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f9fd5bf166d6e36db8b58d6a17e465a5f89d239d
--- /dev/null
+++ b/test.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+if ! curl -sf -o /dev/null http://localhost:9200/_cat/indices ; then
+    echo "Could not request indices"
+    exit 1
+fi
+echo "Successfully contacted ES"
+
+set -e
+
+echo "Loading some data..."
+
+curl -sf -X PUT "http://localhost:9200/logs-my_app-default/_bulk?pretty" -H 'Content-Type: application/json' -d'
+{ "create": { } }
+{ "@timestamp": "2099-05-07T16:24:32.000Z", "event": { "original": "192.0.2.242 - - [07/May/2020:16:24:32 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0" } }
+{ "create": { } }
+{ "@timestamp": "2099-05-08T16:25:42.000Z", "event": { "original": "192.0.2.255 - - [08/May/2099:16:25:42 +0000] \"GET /favicon.ico HTTP/1.0\" 200 3638" } }
+'
+
+echo "Checking indices..."
+
+curl -sf http://localhost:9200/_cat/indices
+
+exit 0
+
diff --git a/testconf/curator.yml b/testconf/curator.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7f094ba2f20e741b39deaebf372fbec89595fed5
--- /dev/null
+++ b/testconf/curator.yml
@@ -0,0 +1,20 @@
+---
+client:
+  hosts:
+    - 127.0.0.1
+  port: 9200
+  url_prefix:
+  use_ssl: False
+  certificate:
+  client_cert:
+  client_key:
+  ssl_no_validate: False
+  http_auth:
+  timeout: 30
+  master_only: True
+
+logging:
+  loglevel: INFO
+  logfile:
+  logformat: default
+  blacklist: ['elasticsearch', 'urllib3']
diff --git a/testconf/elasticsearch.yml b/testconf/elasticsearch.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d1e16c1c07c08c0f2363cad5931dd26f9318a3fa
--- /dev/null
+++ b/testconf/elasticsearch.yml
@@ -0,0 +1,98 @@
+# ======================== Elasticsearch Configuration =========================
+#
+# NOTE: Elasticsearch comes with reasonable defaults for most settings.
+#       Before you set out to tweak and tune the configuration, make sure you
+#       understand what are you trying to accomplish and the consequences.
+#
+# The primary way of configuring a node is via this file. This template lists
+# the most important settings you may want to configure for a production cluster.
+#
+# Please consult the documentation for further information on configuration options:
+# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
+#
+# ---------------------------------- Cluster -----------------------------------
+#
+# Use a descriptive name for your cluster:
+#
+cluster.name: logs
+#
+# ------------------------------------ Node ------------------------------------
+#
+# Use a descriptive name for the node:
+#
+node.name: test
+#
+# Add custom attributes to the node:
+#
+#node.attr.rack: r1
+#
+# ----------------------------------- Paths ------------------------------------
+#
+# Path to directory where to store the data (separate multiple locations by comma):
+#
+path.data: /var/lib/elasticsearch
+#
+# Path to log files:
+#
+path.logs: /var/log/elasticsearch
+#
+# ----------------------------------- Memory -----------------------------------
+#
+# Lock the memory on startup:
+#
+#bootstrap.memory_lock: true
+#
+# Since we run ES in a container with --user we can't apply a seccomp
+# profile on top of the one Docker already provides.
+bootstrap.system_call_filter: false
+#
+# Make sure that the heap size is set to about half the memory available
+# on the system and that the owner of the process is allowed to use this
+# limit.
+#
+# Elasticsearch performs poorly when the system is swapping the memory.
+#
+# ---------------------------------- Network -----------------------------------
+#
+# Set the bind address to a specific IP (IPv4 or IPv6):
+#
+network.bind_host: "0.0.0.0"
+network.publish_host: "localhost"
+#
+# Set a custom port for HTTP:
+#
+#http.port: 9200
+#
+# For more information, consult the network module documentation.
+#
+# --------------------------------- Discovery ----------------------------------
+#
+# Pass an initial list of hosts to perform discovery when new node is started:
+# The default list of hosts is ["127.0.0.1", "[::1]"]
+#
+#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
+#
+# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
+#
+#discovery.zen.minimum_master_nodes: 
+#
+# For more information, consult the zen discovery module documentation.
+#
+# ---------------------------------- Gateway -----------------------------------
+#
+# Block initial recovery after a full cluster restart until N nodes are started:
+#
+gateway.recover_after_nodes: 1
+gateway.expected_nodes: 1
+#
+# For more information, consult the gateway module documentation.
+#
+# ---------------------------------- Various -----------------------------------
+#
+# Require explicit names when deleting indices:
+#
+#action.destructive_requires_name: true
+#
+
+cluster.initial_master_nodes:
+  - "test"
diff --git a/testconf/jvm.options b/testconf/jvm.options
new file mode 100644
index 0000000000000000000000000000000000000000..54b6b79e18028c556efa875e636145095368fcf4
--- /dev/null
+++ b/testconf/jvm.options
@@ -0,0 +1,106 @@
+## JVM configuration
+
+################################################################
+## IMPORTANT: JVM heap size
+################################################################
+##
+## You should always set the min and max JVM heap
+## size to the same value. For example, to set
+## the heap to 4 GB, set:
+##
+## -Xms4g
+## -Xmx4g
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
+## for more information
+##
+################################################################
+
+# Xms represents the initial size of total heap space
+# Xmx represents the maximum size of total heap space
+
+-Xms128M
+-Xmx128M
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below this section are considered
+## expert settings. Don't tamper with them unless
+## you understand what you are doing
+##
+################################################################
+
+-XX:+UseG1GC
+-XX:G1ReservePercent=25
+-XX:InitiatingHeapOccupancyPercent=30
+
+## optimizations
+
+# pre-touch memory pages used by the JVM during initialization
+-XX:+AlwaysPreTouch
+
+## basic
+
+# force the server VM
+-server
+
+# explicitly set the stack size
+-Xss1m
+
+# set to headless, just in case
+-Djava.awt.headless=true
+
+# ensure UTF-8 encoding by default (e.g. filenames)
+-Dfile.encoding=UTF-8
+
+# use our provided JNA always versus the system one
+-Djna.nosys=true
+
+# turn off a JDK optimization that throws away stack traces for common
+# exceptions because stack traces are important for debugging
+-XX:-OmitStackTraceInFastThrow
+
+# flags to configure Netty
+-Dio.netty.noUnsafe=true
+-Dio.netty.noKeySetOptimization=true
+-Dio.netty.recycler.maxCapacityPerThread=0
+
+# log4j 2
+-Dlog4j.shutdownHookEnabled=false
+-Dlog4j2.disable.jmx=true
+-Dlog4j.skipJansi=true
+
+-Djava.io.tmpdir=${ES_TMPDIR}
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails
+# heap dumps are created in the working directory of the JVM
+#-XX:+HeapDumpOnOutOfMemoryError
+
+# specify an alternative path for heap dumps
+# ensure the directory exists and has sufficient space
+#-XX:HeapDumpPath=/var/lib/elasticsearch
+
+## JDK 8 GC logging
+
+#8:-XX:+PrintGCDetails
+#8:-XX:+PrintGCDateStamps
+#8:-XX:+PrintTenuringDistribution
+#8:-XX:+PrintGCApplicationStoppedTime
+#8:-Xloggc:/var/log/elasticsearch/gc.log
+#8:-XX:+UseGCLogFileRotation
+#8:-XX:NumberOfGCLogFiles=32
+#8:-XX:GCLogFileSize=64m
+
+# JDK 9+ GC logging
+#9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
+# due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise
+# time/date parsing will break in an incompatible way for some date patterns and locals
+#9-:-Djava.locale.providers=COMPAT
+
+# Silence annoying warning (cf. https://discuss.elastic.co/t/http-publish-host-was-printed-as-ip-port-instead-of-hostname-ip-port/170820/2)
+-Des.transport.cname_in_publish_address=true
+
diff --git a/testconf/log4j2.properties b/testconf/log4j2.properties
new file mode 100644
index 0000000000000000000000000000000000000000..6dd5029fb02d991f966270c6a1584bd49f28141b
--- /dev/null
+++ b/testconf/log4j2.properties
@@ -0,0 +1,32 @@
+# Configure file-only logging, to avoid creating a loop with
+# syslog (via journald forwarding).
+
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
+appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+
+rootLogger.level = info
+rootLogger.appenderRef.rolling.ref = rolling