Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • acmeserver2
  • clickhouse
  • dns-resolver
  • docs_operating
  • env-vars-in-include-paths
  • improve-dns-toplevel-probes
  • improve-service-discovery
  • loki
  • master
  • net-overlay_firewall_containers
  • nginx-default-site
  • prometheus-external-healthchecks
  • registry-mirror
  • rsyslog-modern-json
  • service-turndown
  • tabacco-in-container
  • use_proxy_protocol
  • webdiff
18 results

Target

Select target project
  • ai3/float
  • micah/float
2 results
Select Git revision
  • acmeserver2
  • clickhouse
  • dns-resolver
  • docs_operating
  • env-vars-in-include-paths
  • improve-dns-toplevel-probes
  • improve-service-discovery
  • loki
  • master
  • net-overlay_firewall_containers
  • nginx-default-site
  • prometheus-external-healthchecks
  • registry-mirror
  • rsyslog-modern-json
  • service-turndown
  • tabacco-in-container
  • use_proxy_protocol
  • webdiff
18 results
Show changes
Commits on Source (121)
Showing
with 121 additions and 1181 deletions
......@@ -21,11 +21,13 @@ variables:
--passwords=${TEST_DIR}/passwords.yml
--num-hosts=1
${LIBVIRT:+-e libvirt.remote_host=${LIBVIRT#*@} -e libvirt.remote_user=${LIBVIRT%@*}}
-e ansible_cfg.defaults.strategy=mitogen_linear ${MITOGEN:+-e ansible_cfg.defaults.strategy_plugins=${MITOGEN}/ansible_mitogen/plugins/strategy}
${APT_PROXY:+-e config.apt_proxy=${APT_PROXY}}
$CREATE_ENV_VARS $BUILD_DIR
- with-ssh-key floatup ${LIBVIRT:+--ssh $LIBVIRT} --inventory $BUILD_DIR/hosts.yml --ram 2048 --cpu 2 --image ${VM_IMAGE:-bullseye} ${FLOATUP_ARGS} up
- with-ssh-key floatup ${LIBVIRT:+--ssh $LIBVIRT} --inventory $BUILD_DIR/hosts.yml --ram 2048 --cpu 2 --image ${VM_IMAGE:-bookworm} ${FLOATUP_ARGS} up
- ls -al /root/.ssh
- cat /root/.ssh/config
- cat $BUILD_DIR/hosts.yml
- with-ssh-key ./test-driver init --no-vagrant $BUILD_DIR
- with-ssh-key ./test-driver run $BUILD_DIR
after_script:
......@@ -35,26 +37,38 @@ variables:
CREATE_ENV_VARS: ""
TEST_DIR: ""
tags: [ai3]
# Some artifacts may be missing, depending on the specific job.
artifacts:
when: on_failure
when: always
expire_in: 1 week
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_SLUG}_${CI_COMMIT_SHORT_SHA}"
reports:
dotenv: deploy.env
junit: pytest.xml
paths:
- ".vmine_group_review*"
- "${BUILD_DIR}/ansible.log"
- "${BUILD_DIR}/logs"
base_test:
<<: *base_test
variables:
VM_IMAGE: "bullseye"
CREATE_ENV_VARS: "-e config.float_debian_dist=bullseye -e inventory.group_vars.vagrant.ansible_python_interpreter=/usr/bin/python3"
VM_IMAGE: "bookworm"
TEST_DIR: "test/base.ref"
trixie_test:
<<: *base_test
# Need a more recent Ansible version, for Python 3.12 targets.
image: registry.git.autistici.org/ai3/docker/float-runner:trixie
variables:
VM_IMAGE: "trixie"
CREATE_ENV_VARS: "-e config.float_debian_dist=trixie"
TEST_DIR: "test/base.ref"
full_test:
<<: *base_test
variables:
VM_IMAGE: "bullseye"
CREATE_ENV_VARS: "-e config.float_debian_dist=bullseye -e inventory.group_vars.vagrant.ansible_python_interpreter=/usr/bin/python3"
VM_IMAGE: "bookworm"
TEST_DIR: "test/full.ref"
rules:
- if: $CI_MERGE_REQUEST_ID == ''
......@@ -64,16 +78,11 @@ full_test_review:
after_script:
- with-ssh-key ./test-driver cleanup --no-vagrant $BUILD_DIR
variables:
VM_IMAGE: "bullseye"
CREATE_ENV_VARS: "-e config.float_debian_dist=bullseye -e inventory.group_vars.vagrant.ansible_python_interpreter=/usr/bin/python3"
VM_IMAGE: "bookworm"
CREATE_ENV_VARS: "-e inventory.group_vars.vagrant.ansible_python_interpreter=/usr/bin/python3"
FLOATUP_ARGS: "--state-file .vmine_group_review_$CI_MERGE_REQUEST_ID --ttl 6h --env deploy.env --dashboard-url https://vm.investici.org"
TEST_DIR: "test/full.ref"
allow_failure: true
artifacts:
when: always
reports:
dotenv: deploy.env
paths: ['.vmine_group_review*']
environment:
name: review/$CI_COMMIT_REF_SLUG
url: $VMINE_GROUP_URL
......@@ -103,13 +112,6 @@ stop_full_test_review:
# CREATE_ENV_VARS: "--additional-config test/backup.ref/config-backup.yml --playbook test/backup.ref/site.yml"
# TEST_DIR: "test/backup.ref"
bookworm_test:
<<: *base_test
variables:
VM_IMAGE: "bookworm"
CREATE_ENV_VARS: "-e config.float_debian_dist=bookworm"
TEST_DIR: "test/full.ref"
docker_build_and_release_tests:
stage: docker_build
image: quay.io/podman/stable
......@@ -122,6 +124,7 @@ docker_build_and_release_tests:
only:
changes:
- test/float_integration_test/**
- test/Dockerfile
refs:
- master
......@@ -11,13 +11,12 @@ stretch build host), and distribute it with alternative methods.
These can normally be built with standard Debian development tools,
such as *dpkg-buildpackage*.
* [ai/sso](https://git.autistici.org/ai/sso)
* [id/auth](https://git.autistici.org/id/auth)
* [id/go-sso](https://git.autistici.org/id/go-sso)
* [id/sso-server](https://git.autistici.org/id/sso-server)
* [id/keystore](https://git.autistici.org/id/keystore)
* [id/usermetadb](https://git.autistici.org/id/usermetadb)
* [ale/zonetool](https://git.autistici.org/ale/zonetool)
* [ai3/tools/zonetool](https://git.autistici.org/ai3/tools/zonetool)
* [ai3/tools/cgroups-exporter](https://git.autistici.org/ai3/tools/cgroups-exporter)
* [ai3/tools/runcron](https://git.autistici.org/ai3/tools/runcron)
* [ai3/tools/audisp-json](https://git.autistici.org/ai3/tools/audisp-json)
......@@ -28,7 +27,7 @@ such as *dpkg-buildpackage*.
* [ai3/tools/tabacco](https://git.autistici.org/ai3/tools/tabacco)
* [ai3/thirdparty/rsyslog-exporter](https://git.autistici.org/ai3/thirdparty/rsyslog-exporter)
* [ai3/thirdparty/restic](https://git.autistici.org/ai3/thirdparty/restic)
* [ai3/thirdparty/litestream](https://git.autistici.org/ai3/thirdparty/litestream)
These are distributed via our own package repository at
*deb.autistici.org*, which currently supports the *amd64* and *arm64*
......
Playbook
===
This document describes how to perform some common operations in
*float*.
## Applying changes
### Rolling back the configuration
If you are using a Git repository as your configuration source,
*float* will keep track of which commit has been pushed to production
last, and it will try to prevent you from pushing an old version of
the configuration, failing immediately with an error. This is a simple
check to make sure that people do not inadvertently roll back the
production configuration by pushing from an out-of-date client.
In most cases what you want to do in that case is to simply run *git
pull* and bring your copy of the repository up to date. But if you
really need to push an old version of the configuration in an
emergency, you can do so by setting the *rollback* value to *true* on
the command-line:
```shell
$ float run -e rollback=true site.yml
```
## For administrators
### SSH Client Setup
If you delegated SSH management to float by setting *enable_ssh* to
true (see the [configuration reference](configuration.md)), float will
create a SSH CA to sign all your host keys.
You will find the public key for this CA in the
*credentials/ssh/key.pub* file, it will be created the first time you
run the "init-credentials" playbook.
Assuming that all your target hosts share the same domain (so you can
use a wildcard), you should add the following entry to
*~/.ssh/known_hosts*:
```
@cert_authority *.example.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAA....
```
Since all logins happen as root, it may be convenient to also add a
section to your *~/.ssh/config* file like the following:
```
Host *.example.com
User root
```
### Adding an admin account
Adding a new administrator account is just a matter of editing the
*admins* [configuration variable](configuration.md) and add a new
entry to it.
The first thing you will need is a hashed version of your
password. The authentication service in float supports a number of
legacy hashing schemes, including those supported by the system
crypt(). The most secure hashing scheme supported is Argon2, and you
can use our custom tool to generate a valid hash. To install it:
```shell
$ go install git.autistici.org/ai3/go-common/cmd/pwtool
```
Run the *pwtool* utility with your new password as an argument, as
shown below:
```shell
# Do not save your password in the history of your shell
$ export HISTIGNORE="./pwtool.amd64*"
$ ./pwtool.amd64 PASSWORD
```
where PASSWORD is your desired password.
It will output the hashed password.
Then modify the YAML file *group_vars/all/admins.yml*. At the bare
minimum the new account should have a *name*, *email*, *password* and
*ssh_keys* attributes, e.g.:
```yaml
---
admins:
- name: "foo"
email: "foo@example.com"
password: "$a2$3$32768$4$abcdef...."
ssh_keys:
- "ssh-ed25519 AAAAC3Nza..."
```
Here above "ssh_keys:" needs to be populated with your public key,
possibly stripped from the trailing user@hostname text (which may leak
your personal information), and "password:" must be the hashed
password you got from *pwtool* earlier.
### Setting up OTP for an admin account
First you need to manually generate the OTP secret on your computer:
```shell
$ SECRET=$(dd if=/dev/urandom bs=20 count=1 2>/dev/null | base32)
$ echo $SECRET
EVUVNACTWRAIERATIZUQA6YQ4WS63RN2
```
Install the package qrencode, and feed the OTP secret to it.
For example with apt ["apt install qrencode" of course].
```shell
$ EMAIL="sub@krutt.org"
$ qrencode -t UTF8 "otpauth://totp/example.com:${EMAIL}?secret=${SECRET}&issuer=example.com&algorithm=SHA1&digits=6&period=30"
```
and read the qrcode with your favourite app.
Then add it to your user object in *group_vars/all/admins.yml* as the
*totp_secret* attribute:
```yaml
---
admins:
- name: "foo"
totp_secret: "EVUVNACTWRAIERATIZUQA6YQ4WS63RN2"
...
```
Finally, configure your TOTP client (app, YubiKey, etc.) with the same
secret.
Note that the secret is stored in cleartext in the git repository, so
using a hardware token (U2F) is preferred.
### Registering a U2F hardware token for an admin account
In the *group_vars/all/admins.yml* file, you can add the
*u2f_registrations* attribute to accounts, which is a list of the
allowed U2F device registrations.
To register a new device, you are going to need the *pamu2fcfg* tool
(part of the *pamu2fcfg* Debian package). The following snippet should
produce the two YAML attributes that you need to set:
```shell
$ pamu2fcfg --nouser --appid https://accounts.example.com \
| tr -d : \
| awk -F, '{print "key_handle: \"" $1 "\"\npublic_key: \"" $2 "\""}'
```
press enter, touch the key, copy the output and insert it in
*group_vars/all/admins.yml*, the final results should look like:
```yaml
---
admins:
- name: "foo"
email: "foo@example.com"
password: "$a2$3$32768$4$abcdef...."
ssh_keys:
- "ssh-ed25519 AAAAC3Nza..."
u2f_registrations:
- key_handle: "r4wWRHgzJjl..."
public_key: "04803e4aff4..."
```
**NOTE**: the above will work with *pam_u2f* version 1.0.7, but it will *not*
work with pam_u2f version 1.1.0 due to changes in the output format!
......@@ -2088,6 +2088,12 @@ using single sign-on, allowing access only to administrators (members
of the *admins* group). This is quite useful for admin web interfaces
of internal services that do not support SSO integration of their own.
`enable_api_proxy`: If true, place the service behind authentication
using a mechanism more appropriate for non-interactive APIs (HTTP
Basic Authentication using Application-Specific Passwords). Only members
of the *admins* group will have access. When this option is set, you
also need to specify a unique `auth_service` to be used for ASPs.
#### HTTP (All domains)
`horizontal_endpoints`: List of HTTP endpoints exported by the
......@@ -2494,9 +2500,12 @@ attributes that specify static DNS entries that will be added to
`nginx_cache_keys_mem` is the memory size of the key buffer for the
global NGINX HTTP cache.
`nginx_cache_fs_size` is the maximum on-disk size of the NGINX HTTP
cache (note that NGINX might use as much as twice what specified here,
depending on expiration policy).
`nginx_cache_custom_params` are additional parameters for customizing
the *proxy_cache_path* NGINX configuration directive for the global
cache. The most important attribute you might want to set is possibly
*max_size*, which controls the maximum size of the on-disk cache (note
that NGINX might use as much as twice what specified, depending on
expiration policy).
`nginx_global_custom_headers` - a dictionary of {header: value} pairs
corresponding to HTTP headers that must be set on *every* response.
......@@ -2798,7 +2807,7 @@ There are some minimal requirements on how your Ansible environment
should be set up for this to work:
* you must have a *group_vars/all* directory (this is where we'll
write the autogenerated application credentials file *secrets.yml*q)
write the autogenerated application credentials file *secrets.yml*)
* you must include float's *playbooks/all.yml* playbook file from the
toolkit source directory at the beginning of your playbook
* you should use the *float* wrapper instead of running
......@@ -3241,7 +3250,7 @@ Install the package qrencode, and feed the OTP secret to it.
For example with apt ["apt install qrencode" of course].
```shell
$ EMAIL="sub@krutt.org"
$ EMAIL="foo@example.com"
$ qrencode -t UTF8 "otpauth://totp/example.com:${EMAIL}?secret=${SECRET}&issuer=example.com&algorithm=SHA1&digits=6&period=30"
```
......@@ -3318,6 +3327,19 @@ If you want more control over this process (Debian upgrades have been
event-less for a while now, but it's not always been the case) you
can of course run the upgrade manually.
### Decommissioning a host
When turning down a host, it is necessary, at some point, to
reschedule the services that were there onto some other hosts. To
achieve a smooth transition, this is best done while the host is still
available.
To do this, set the *turndown* attribute to *true* in the inventory
for the host you want to turn down, and then run *float* once more.
This should safely reschedule all services, and remove them from the
target host. It is then possible to simply shut down the target host
and wipe its data.
# Example scenarios
This section will look at some example scenarios and use cases for
......
No preview for this file type
......@@ -162,13 +162,7 @@ DEFAULT_VARS = {
# Ansible inventory (hosts are created dynamically).
'inventory': {
'hosts': {},
'group_vars': {
'vagrant': {
'ansible_user': 'vagrant',
'ansible_become': True,
'ansible_ssh_private_key_file': '~/.vagrant.d/insecure_private_key',
},
},
'group_vars': {},
},
# Ansible configuration.
......@@ -182,6 +176,7 @@ DEFAULT_VARS = {
'force_handlers': True,
'log_path': 'ansible.log',
'retry_files_enabled': False,
'interpreter_python': '/usr/bin/python3',
'nocows': 1,
'display_skipped_hosts': False,
......@@ -346,7 +341,7 @@ def _render_skel(target_dir, ctx):
def command_create_env(path, services, passwords, playbooks,
roles_path, num_hosts, additional_host_groups,
additional_configs, ram, domain, infra_domain,
extra_vars):
become, extra_vars):
all_vars = DEFAULT_VARS
# Set paths in the internal config.
......@@ -355,6 +350,20 @@ def command_create_env(path, services, passwords, playbooks,
all_vars['passwords_yml_path'] = passwords
all_vars['playbooks'] = playbooks
# Set connection-related user parameters.
if become == 'root':
all_vars['inventory']['group_vars']['vagrant'] = {
'ansible_user': 'root',
'ansible_become': False,
}
else:
all_vars['inventory']['group_vars']['vagrant'] = {
'ansible_user': become,
'ansible_become': True,
# For legacy compatibility reasons.
'ansible_ssh_private_key_file': '~/.vagrant.d/insecure_private_key',
}
# Extend the Ansible roles_path.
if roles_path:
for rpath in roles_path.split(':'):
......@@ -364,14 +373,13 @@ def command_create_env(path, services, passwords, playbooks,
# Catch ValueError to handle parsing errors for composite-valued
# options and print a friendly message.
try:
all_vars['inventory']['hosts'] = _random_hosts(
num_hosts,
_parse_additional_host_groups(additional_host_groups),
)
extra_memberships = _parse_additional_host_groups(additional_host_groups)
except ValueError:
print('Unable to parse additional-host-group spec', file=sys.stderr)
return 1
all_vars['inventory']['hosts'] = _random_hosts(num_hosts, extra_memberships)
all_vars['ram'] = ram
all_vars['config']['domain_public'] = [domain]
all_vars['config']['domain'] = (
......@@ -549,6 +557,9 @@ memberships, using the --additional-host-group command-line option.
create_env_parser.add_argument(
'--ram', metavar='MB', type=int, default=3072,
help='RAM for each VM when using --vagrant (default: 3072)')
create_env_parser.add_argument(
'--become', metavar='USER', default='root',
help='ansible_user, disable ansible_become if "root"')
create_env_parser.add_argument(
'--additional-host-group', metavar='GROUP=HOST1[,HOST2...]',
dest='additional_host_groups',
......
......@@ -62,4 +62,4 @@
loop: "{{ x509_ca_list | default(default_x509_ca_list) }}"
- name: Generate global DH params
local_action: command openssl dhparam -out "{{ credentials_dir }}/x509/dhparam" "{{ dhparam_bits | default('2048') }}" creates="{{ credentials_dir }}/x509/dhparam"
local_action: command openssl dhparam -out "{{ credentials_dir }}/x509/dhparam-{{ dhparam_bits | default('2048') }}" "{{ dhparam_bits | default('2048') }}" creates="{{ credentials_dir }}/x509/dhparam-{{ dhparam_bits | default('2048') }}"
......@@ -282,6 +282,16 @@ def _global_dns_map(inventory):
return dns
# Return the hosts that are not available for scheduling, as a
# Python set.
def _unavailable_hosts(inventory):
unavail = set()
for name, values in inventory['hosts'].items():
if values.get('turndown'):
unavail.add(name)
return unavail
# Build a group -> hosts map out of an inventory.
def _build_group_map(inventory, assignments=None):
group_map = {}
......@@ -322,6 +332,7 @@ def _build_public_endpoints_map(services):
'name': upstream_name,
'service_name': service_name,
'port': pe['port'],
'enable_api_proxy': pe.get('enable_api_proxy', False),
'enable_sso_proxy': pe.get('enable_sso_proxy', False),
'sharded': pe.get('sharded', False),
}
......@@ -375,6 +386,7 @@ def _build_horizontal_upstreams_map(services):
'name': upstream_name,
'service_name': service_name,
'port': ep['port'],
'enable_api_proxy': False,
'enable_sso_proxy': False,
'sharded': False,
}
......@@ -499,7 +511,8 @@ class Assignments(object):
return str(self._fwd)
@classmethod
def _available_hosts(cls, service, group_map, service_hosts_map):
def _available_hosts(cls, service, group_map, service_hosts_map,
unavailable_hosts={}):
if 'schedule_with' in service:
return service_hosts_map[service['schedule_with']]
scheduling_groups = ['all']
......@@ -512,7 +525,7 @@ class Assignments(object):
if g not in group_map:
raise Exception(f'The scheduling_group "{g}" is not defined in inventoy')
available_hosts.update(group_map[g])
return list(available_hosts)
return list(available_hosts.difference(unavailable_hosts))
@classmethod
def schedule(cls, services, inventory):
......@@ -525,6 +538,7 @@ class Assignments(object):
"""
service_hosts_map = {}
service_master_map = {}
unavailable_hosts = _unavailable_hosts(inventory)
group_map = _build_group_map(inventory)
host_occupation = collections.defaultdict(int)
......@@ -540,13 +554,16 @@ class Assignments(object):
for service_name in sorted(services.keys(), key=_sort_key):
service = services[service_name]
available_hosts = cls._available_hosts(service, group_map,
service_hosts_map)
service_hosts_map,
unavailable_hosts)
num_instances = service.get('num_instances', 'all')
if num_instances == 'all':
service_hosts = sorted(available_hosts)
else:
service_hosts = sorted(_binpack(
available_hosts, host_occupation, num_instances))
if not service_hosts:
raise Exception(f'No hosts available to schedule service {service_name}')
service_hosts_map[service_name] = service_hosts
for h in service_hosts:
host_occupation[h] += 1
......
......@@ -6,7 +6,7 @@
set -a
. /etc/litestream/{{ dataset_tag }}.env
/usr/bin/litestream restore --config=/etc/litestream/{{ dataset_tag }}.yml --if-replica-exists -v "{{ dataset_path }}/{{ dataset_filename }}"
/usr/bin/litestream restore --config=/etc/litestream/{{ dataset_tag }}.yml --if-replica-exists "{{ dataset_path }}/{{ dataset_filename }}"
if [ $? -gt 0 ]; then
echo "ERROR: restore failed!" >&2
......
File deleted
{
"defaultAction": "SCMP_ACT_ERRNO",
"archMap": [
{
"architecture": "SCMP_ARCH_X86_64",
"subArchitectures": [
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
]
},
{
"architecture": "SCMP_ARCH_AARCH64",
"subArchitectures": [
"SCMP_ARCH_ARM"
]
},
{
"architecture": "SCMP_ARCH_MIPS64",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPS64N32",
"subArchitectures": [
"SCMP_ARCH_MIPS",
"SCMP_ARCH_MIPS64"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64N32"
]
},
{
"architecture": "SCMP_ARCH_MIPSEL64N32",
"subArchitectures": [
"SCMP_ARCH_MIPSEL",
"SCMP_ARCH_MIPSEL64"
]
},
{
"architecture": "SCMP_ARCH_S390X",
"subArchitectures": [
"SCMP_ARCH_S390"
]
}
],
"syscalls": [
{
"names": [
"_llseek",
"_newselect",
"accept",
"accept4",
"access",
"adjtimex",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"get_robust_list",
"get_thread_area",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"io_destroy",
"io_getevents",
"io_setup",
"io_submit",
"ioctl",
"ioprio_get",
"ioprio_set",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mount",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"name_to_handle_at",
"nanosleep",
"newfstatat",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"preadv2",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"pwritev2",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"reboot",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"set_robust_list",
"set_thread_area",
"set_tid_address",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"setsid",
"setsockopt",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"statx",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"umount",
"umount2",
"uname",
"unlink",
"unlinkat",
"unshare",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 0,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 8,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 131072,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 131080,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 4294967295,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {}
},
{
"names": [
"sync_file_range2"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"ppc64le"
]
},
"excludes": {}
},
{
"names": [
"arm_fadvise64_64",
"arm_sync_file_range",
"sync_file_range2",
"breakpoint",
"cacheflush",
"set_tls"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"arm",
"arm64"
]
},
"excludes": {}
},
{
"names": [
"arch_prctl"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"amd64",
"x32"
]
},
"excludes": {}
},
{
"names": [
"modify_ldt"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"amd64",
"x32",
"x86"
]
},
"excludes": {}
},
{
"names": [
"s390_pci_mmio_read",
"s390_pci_mmio_write",
"s390_runtime_instr"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"arches": [
"s390",
"s390x"
]
},
"excludes": {}
},
{
"names": [
"open_by_handle_at"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_DAC_READ_SEARCH"
]
},
"excludes": {}
},
{
"names": [
"bpf",
"clone",
"fanotify_init",
"lookup_dcookie",
"mount",
"name_to_handle_at",
"perf_event_open",
"quotactl",
"setdomainname",
"sethostname",
"setns",
"umount",
"umount2",
"unshare"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_ADMIN"
]
},
"excludes": {}
},
{
"names": [
"clone"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
],
"comment": "",
"includes": {},
"excludes": {
"caps": [
"CAP_SYS_ADMIN"
],
"arches": [
"s390",
"s390x"
]
}
},
{
"names": [
"clone"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 1,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
],
"comment": "s390 parameter ordering for clone is different",
"includes": {
"arches": [
"s390",
"s390x"
]
},
"excludes": {
"caps": [
"CAP_SYS_ADMIN"
]
}
},
{
"names": [
"reboot"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_BOOT"
]
},
"excludes": {}
},
{
"names": [
"chroot"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_CHROOT"
]
},
"excludes": {}
},
{
"names": [
"delete_module",
"init_module",
"finit_module",
"query_module"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_MODULE"
]
},
"excludes": {}
},
{
"names": [
"get_mempolicy",
"mbind",
"name_to_handle_at",
"set_mempolicy"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_NICE"
]
},
"excludes": {}
},
{
"names": [
"acct"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_PACCT"
]
},
"excludes": {}
},
{
"names": [
"kcmp",
"process_vm_readv",
"process_vm_writev",
"ptrace"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_PTRACE"
]
},
"excludes": {}
},
{
"names": [
"iopl",
"ioperm"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_RAWIO"
]
},
"excludes": {}
},
{
"names": [
"settimeofday",
"stime",
"clock_settime"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_TIME"
]
},
"excludes": {}
},
{
"names": [
"vhangup"
],
"action": "SCMP_ACT_ALLOW",
"args": [],
"comment": "",
"includes": {
"caps": [
"CAP_SYS_TTY_CONFIG"
]
},
"excludes": {}
}
]
}
\ No newline at end of file
# TODO: switch to keyserver once the apt_key --no-tty bug in Ansible is fixed.
- name: Install docker.com GPG key
apt_key:
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Install docker.com package repository
apt_repository:
repo: "deb [arch=amd64] {% if apt_proxy is defined %}http://{{ apt_proxy }}/HTTPS/{% else %}https:{% endif %}//download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
- file:
path: "/etc/docker"
state: directory
- name: Configure docker daemon
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
notify:
- restart docker
- name: Install docker packages
apt:
name: "{{ packages }}"
state: present
vars:
packages:
- docker-ce
- systemd-docker
......@@ -5,11 +5,11 @@
src: "assetmon.default.j2"
dest: "/etc/default/assetmon"
- include_tasks: docker.yml
when: "container_runtime == 'docker'"
- fail:
msg: "Only the Podman container runtime is supported"
when: "container_runtime != 'podman'"
- include_tasks: podman.yml
when: "container_runtime == 'podman'"
- name: Login to the Docker registry
shell: 'echo -n "{{ docker_registry_password }}" | {{ container_runtime }} login --authfile {{ docker_auth_file }} -u "{{ docker_registry_username }}" --password-stdin "{{ docker_registry_url }}"'
......@@ -17,7 +17,7 @@
check_mode: no
when: "docker_registry_url != ''"
- name: Install docker-related scripts
- name: Install container-related scripts
template:
src: "{{ item.src }}"
dest: "{{ item.dst }}"
......@@ -30,7 +30,7 @@
- src: "in-container.j2"
dst: "/usr/local/bin/in-container"
- name: Install docker-related files
- name: Install container-related files
copy:
src: "{{ item.src }}"
dest: "{{ item.dst }}"
......@@ -39,7 +39,7 @@
- src: "in-container.sh"
dst: "/etc/profile.d/in-container.sh"
- name: Install docker cleanup cron job
- name: Install container cleanup cron job
copy:
dest: /etc/cron.d/docker-cleanup
content: "33 3 * * * root runcron --quiet /usr/local/bin/docker-cleanup\n"
......
---
# Pick a package source for Podman - defaults to using the stock
# Debian package since bullseye. Possible choices are 'ai' (a version
# pinned in our repository, tested working on Debian buster), 'debian'
# (use standard Debian packages, only available from bullseye) or
# 'kubic' (use the upstream Kubic repositories, with the latest
# release).
- set_fact:
podman_default_package_source: "{{ podman_default_package_source_by_distro[float_debian_dist] | default('debian') }}"
- set_fact:
podman_package_source: "{{ podman_default_package_source }}"
when: "podman_package_source is not defined"
- include_tasks: "podman_{{ podman_package_source }}.yml"
# Try to detect which podman version we're on based on source and
# distribution.
- set_fact:
podman_version: "{{ 3 if float_debian_dist in ('buster', 'bullseye') else 4 }}"
when: "podman_package_source == 'debian'"
- set_fact:
podman_version: 4
when: "podman_package_source != 'debian'"
# If we've changed sources.list for podman, it is important
# that we run apt upgrade now.
- name: Apt upgrade
# Install Podman using packages from the Debian distribution
# (available starting with Bullseye).
- name: Install Podman packages
apt:
upgrade: 'yes'
when: "podman_sources_list is defined and podman_sources_list.changed"
name: "{{ packages }}"
state: present
vars:
packages:
- podman
- crun
# Try to detect which podman version we're on based on distribution.
- set_fact:
podman_version: "{{ podman_version_by_distro[float_debian_dist] }}"
- name: Symlink podman to docker
file:
......@@ -46,6 +31,7 @@
copy:
src: storage.conf
dest: "/etc/containers/storage.conf"
when: "podman_version < '5'"
- file:
path: "/etc/containers/certs.d"
......
---
# Install podman from the A/I package repository. This is a "known
# good" version that works on Debian Buster.
#
# Some dependencies need to be pulled from buster-backports.
- name: Remove podman Kubic repository key
file:
path: "/etc/apt/trusted.gpg.d/kubic.gpg"
state: absent
- name: Remove Kubic podman repository
apt_repository:
repo: "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /"
state: absent
- name: Add A/I podman repository
apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/deb.autistici.org.gpg] http://deb.autistici.org/urepo buster-podman/"
state: present
register: podman_sources_list
- name: Pin podman packages to our repository
copy:
dest: "/etc/apt/preferences.d/99podman"
content: |
Package: podman
Pin: origin deb.autistici.org
Pin-Priority: 1001
- name: Install podman packages
apt:
name: podman
state: present
default_release: "{{ 'buster-backports' if float_debian_dist == 'buster' else '' }}"
# TODO: remove this once the podman packaging issues are fixed.
- name: Install a working seccomp.json
copy:
src: "seccomp-0.3.2.json"
dest: "/usr/share/containers/seccomp.json"
---
# Install Podman using packages from the Debian distribution
# (available starting with Bullseye).
- name: Remove podman Kubic repository key
file:
path: "/etc/apt/trusted.gpg.d/kubic.gpg"
state: absent
- name: Remove other podman repositories
apt_repository:
repo: "{{ item }}"
state: absent
loop:
- "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /"
- "deb [signed-by=/usr/share/keyrings/deb.autistici.org.gpg] http://deb.autistici.org/urepo buster-podman/"
- name: Remove podman package pins
file:
path: "/etc/apt/preferences.d/99podman"
state: absent
register: podman_sources_list
- name: Install Podman packages
apt:
name: "{{ packages }}"
state: present
vars:
packages:
- podman
- crun
---
# Install Podman from the upstream "kubic" repository. This is usually
# the latest Podman release.
- name: Add podman Kubic repository key
copy:
src: "kubic.gpg"
dest: "/etc/apt/trusted.gpg.d/kubic.gpg"
- name: Remove A/I podman repository
apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/deb.autistici.org.gpg] http://deb.autistici.org/urepo buster-podman/"
state: absent
- name: Add Kubic podman repository
apt_repository:
repo: "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /"
state: present
register: podman_sources_list
- name: Remove podman package pins
file:
path: "/etc/apt/preferences.d/99podman"
state: absent
# Podman Kubic packages on buster require a version of libseccomp2
# from the backports.
- name: Install libseccomp2 from backports
apt:
name: libseccomp2
state: present
default_release: buster-backports
when: "float_debian_dist == 'buster'"
- name: Install podman packages
apt:
name: podman
state: present
{% set tmp_config = docker_daemon_config_testing if testing|default(True) else {} %}
{{ docker_daemon_config | combine(tmp_config) | to_json }}
......@@ -21,6 +21,7 @@ ExecStart=/usr/lib/float/docker/run-{{ item.service }}-{{ item.container.name }}
ExecStopPost=-/usr/bin/{{ container_runtime }} rm -f -i --cidfile=%t/%N.cid
{% endif %}
ExecStopPost=-rm -f %t/%N.cid
ExecReload=/usr/bin/{{ container_runtime }} kill --signal SIGHUP {{ item.service }}-{{ item.container.name }}
TimeoutStopSec=60
TimeoutStartSec=240
KillMode=mixed
......