diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 19141551aeafa2a03f7a28e056df13b4599dc28f..eaca15b1992f2d2f0c564de2530c579bb41354da 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -56,6 +56,13 @@ full_test:
     CREATE_ENV_VARS: "-e config.float_debian_dist=bullseye -e inventory.group_vars.vagrant.ansible_python_interpreter=/usr/bin/python3"
     TEST_DIR: "test/full.ref"
 
+backup_test:
+  <<: *base_test
+  variables:
+    VM_IMAGE: "bullseye"
+    CREATE_ENV_VARS: "--additional-config test/backup.ref/config-backup.yml --playbook test/backup.ref/site.yml"
+    TEST_DIR: "test/backup.ref"
+
 docker_build_and_release_tests:
   stage: docker_build
   image: docker:latest
diff --git a/docs/reference.md b/docs/reference.md
index 9b6b502eaba61eafd47abee1d024d69e5ccea930..2e470a746c89125188b38cc571c7c3d0936c102e 100644
--- a/docs/reference.md
+++ b/docs/reference.md
@@ -311,23 +311,42 @@ datasets only once (on the service master host).
 ### Backups
 
 If provided with credentials for an external data repository, float
-will automatically make backups of your configured datasets. Float
-runs its own backup management system
-([tabacco](https://git.autistici.org/ai3/tools/tabacco)) on top of
-Restic, which adds additional metadata to Restic snapshots to map
-float datasets.
-
-When a service is scheduled on a new host, for instance as a result of
-a re-scheduling, float will attempt to restore the associated datasets
-from their backups. While this is not a practical failover solution
-for complex services, we've found it works pretty well for a category
-of services with "important, but small - can afford to lose one day of
-changes" datasets that is quite common and useful in itself. For these
-services, running with num_instances=1 and counting on the
-backup/restore data move mechanism might provide sufficient
-availability and reliability.
-
-Restores can of course also be triggered manually whenever necessary.
+will automatically make backups of your configured datasets. These
+aren't just used for disaster recovery, but are an integral part of
+float's service management approach: when a service is scheduled on a
+new host, for instance as a result of a re-scheduling, float will
+attempt to automatically restore the associated datasets from their
+backups. Restores can of course also be triggered manually whenever
+necessary.
+
+Float offers two backup mechanisms for datasets:
+
+* For bulk data, it can use its own backup management system
+  ([tabacco](https://git.autistici.org/ai3/tools/tabacco)) on top of
+  Restic, which adds additional metadata to Restic snapshots to map
+  float datasets. This can be used as a primitive failover solution
+  for services that aren't "important" enough to afford their own
+  distributed storage abstractions, and where losing up to one day of
+  changes is tolerable. An alternative, "live" solution, that would
+  favor correctness over availability, is also in the works. This
+  backup mechanism is *extensible* to understand the structure and
+  metadata of specific services' entities and accounts, if necessary.
+  
+* There are a number of instances, in float, of a specific category of
+  service, single-hosted small API services that run off a simple
+  SQLite database, some of which are critical to float's operation
+  (for example the backup metadata service itself). For this
+  particular use case, float supports backups with
+  [Litestream](https://litestream.io), an asynchronous replication
+  solution for SQLite, that offers point-in-time restore capabilities
+  (less than 1 second of data loss window) in case of disaster or when
+  the service is rescheduled.
+
+  Litestream requires an S3-compatible backend (Minio, AWS, etc).
+
+Note that float does not, in its default configuration, provide the
+data storage services used by its backup mechanisms. These are treated
+as third-party (external) resources.
 
 ### Volumes
 
@@ -2548,16 +2567,53 @@ but it will still be active and functional (via *amtool*).
 #### Backups
 
 To configure the backup system, you're going to need credentials for
-an external repository. The backup system
-uses [restic](https://restic.net), so check its documentation for the
-URI syntax.
+the third-party (external) data storage services. While it is possible
+to run a production service *without* backups configured, note that
+the cluster's functionality will be incomplete unless at least a
+Litestream backend is configured.
+
+##### Bulk backup (Restic)
+
+`backup_repository_uri` - URI of the global (shared) restic
+repository. Though Restic supports [numerous
+backends](https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html),
+float works best with Restic's own [REST
+Server](https://github.com/restic/rest-server).
+
+`backup_repository_restic_password` - password used to encrypt the
+restic repository.
 
-`backup_repository_uri` - URI of the global (shared) restic repository
+##### Asynchronous SQLite replication (Litestream)
 
-`backup_repository_restic_password` - the password used to encrypt
-the restic repository.
+Litestream requires a S3-compatible API to store its SQLite WAL
+snapshots.
 
+`backup_litestream_config` is the object that configures the
+Litestream replica target, and it corresponds to the "replica" field
+of the Litestream configuration, so you can check the [Litestream
+documentation](https://litestream.io/reference/config/#replica-settings)
+for reference. The most important fields to set are `endpoint` (the
+URL of the storage service API), and `bucket` (the name of the bucket
+to use). The *path* attribute will be automatically set by float,
+based on the dataset name.
 
+`backup_litestream_credentials` is a dictionary of environment
+variables to configure credentials for access to the backend storage
+service. Keys will depend on which type of API is being used, but for
+the default *s3* type they should be `LITESTREAM_ACCESS_KEY_ID` and
+`LITESTREAM_SECRET_ACCESS_KEY`.
+
+An example of a (fictional) litestream configuration:
+
+```yaml
+backup_litestream_config:
+  type: s3
+  endpoint: "https://backup.service:9000/"
+  bucket: "mybackups"
+backup_litestream_credentials:
+  LITESTREAM_ACCESS_KEY_ID: "minio"
+  LITESTREAM_SECRET_ACCESS_KEY: "miniopassword"
+```
 
 # Operations
 
diff --git a/docs/reference.pdf b/docs/reference.pdf
index 21e3dd193d87713ffb9c54b2a1827b97e1aa4181..37ae94a87a6bd8325dbcbd9de36b59a83aeb8576 100644
Binary files a/docs/reference.pdf and b/docs/reference.pdf differ
diff --git a/roles/float-base-datasets/files/float-dataset-restore b/roles/float-base-datasets/files/float-dataset-restore
new file mode 100755
index 0000000000000000000000000000000000000000..2af87876b742dea532e57c9b9ab99e55072b84f4
--- /dev/null
+++ b/roles/float-base-datasets/files/float-dataset-restore
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Restore a dataset (tag passed as command-line argument).
+#
+# Uses a guard file to ensure the restore runs only once
+# on a specific machine (or actually, once every time the
+# service is newly scheduled there).
+
+dataset_tag="$1"
+[ -z "${dataset_tag}" ] && exit 2
+
+umask 027
+
+guard_dir=/var/lib/float/datasets
+mkdir -p ${guard_dir}
+
+guard_file="${guard_dir}/${dataset_tag}.restore_guard"
+restore_script="/usr/lib/float/datasets/restore-${dataset_tag}"
+
+if [ -e "${guard_file}" ]; then
+    echo "restore already ran for this dataset, skipping..." >&2
+    exit 0
+fi
+
+${restore_script} && {
+    echo "restore was successful" >&2
+    touch "${guard_file}"
+}
+
+exit $?
diff --git a/roles/float-base-datasets/tasks/dataset.yml b/roles/float-base-datasets/tasks/dataset.yml
index b71cee22d5365e45db610b8e8c60ea69b2bd4b1f..80e0cee04cd116683aaec3a1d681c7832a3bbbbd 100644
--- a/roles/float-base-datasets/tasks/dataset.yml
+++ b/roles/float-base-datasets/tasks/dataset.yml
@@ -6,19 +6,22 @@
 - set_fact:
     service: "{{ item.0 }}"
     dataset: "{{ item.1 }}"
-    dataset_name: "{{ item.0.name }}/{{ item.1.name }}"
-    dataset_filename: "{{ item.0.name }}_{{ item.1.name }}"
-    dataset_owner: "{{ item.1.get('owner', '') }}"
-    dataset_group: "{{ item.1.get('group', 'root') }}"
-    dataset_mode: "{{ item.1.get('mode', '0700') }}"
-    dataset_path: "{{ item.1.get('path', '') }}"
-    dataset_type: "{% if 'backup_command' in item.1 %}pipe{% else %}file{% endif %}"
-    dataset_is_present: "{{ (item.0.name in float_enabled_services) }}"
-    dataset_should_backup: "{{ (item.0.name in float_enabled_services) and ((not item.1.get('on_master_only', False)) or (item.0.get('master_host') == inventory_hostname)) }}"
 
 - set_fact:
-    dataset_should_restore: "{{ dataset_should_backup and not item.1.get('sharded', False) }}"
-    dataset_restore_unit: "restore-{{ dataset_filename }}.service"
+    dataset_name: "{{ service.name }}/{{ dataset.name }}"
+    dataset_tag: "{{ service.name }}_{{ dataset.name }}"
+    dataset_desired_owner: "{{ dataset.get('owner', '') }}"
+    dataset_owner: "root"
+    dataset_group: "{{ dataset.get('group', 'root') }}"
+    dataset_mode: "{{ dataset.get('mode', '0700') }}"
+    dataset_path: "{{ dataset.get('path', '') }}"
+    dataset_driver: "{{ dataset.get('type', 'tabacco') }}"
+    dataset_is_present: "{{ (service.name in float_enabled_services) }}"
+    dataset_should_backup: "{{ (service.name in float_enabled_services) and ((not dataset.get('on_master_only', False)) or (service.get('master_host') == inventory_hostname)) }}"
+
+- set_fact:
+    dataset_should_restore: "{{ dataset_should_backup and not dataset.get('sharded', False) }}"
+    dataset_restore_unit: "restore-{{ dataset_tag }}.service"
 
 - name: "Create path for dataset {{ dataset_name }}"
   file:
@@ -33,59 +36,46 @@
 - name: Check if the dataset owner exists
   getent:
     database: passwd
-    key: "{{ dataset_owner }}"
+    key: "{{ dataset_desired_owner }}"
     fail_key: false
-  when: "(dataset_is_present) and (dataset_path) and (dataset_owner)"
+  when: "(dataset_is_present) and (dataset_path) and (dataset_desired_owner)"
+
+# Make it so that 'dataset_owner' is always safe to use.
+- set_fact:
+    dataset_owner: "{{ dataset_desired_owner }}"
+  when: "dataset_is_present and dataset_path and dataset_desired_owner and getent_passwd.get(dataset_desired_owner)"
 
 - name: "Set permissions for dataset directory of {{ dataset_name }}"
   file:
     path: "{{ dataset_path }}"
     state: directory
     owner: "{{ dataset_owner }}"
-    group: "{{ dataset_group | default('root') }}"
-  when: "(dataset_is_present) and (dataset_path) and (dataset_owner) and (getent_passwd.get(dataset_owner))"
-
-- name: Set up configuration for dataset {{ dataset_name }} (source)
-  template:
-    src: "sources/source.yml.j2"
-    dest: "/etc/tabacco/sources/{{ dataset_filename }}.yml"
-    mode: 0600
-  when: dataset_should_backup
-  notify:
-    - reload backup agent
-
-- name: Set up configuration for dataset {{ dataset_name }} (handler)
-  template:
-    src: "handlers/{{ dataset_type }}.yml.j2"
-    dest: "/etc/tabacco/handlers/{{ dataset_filename }}.yml"
-    mode: 0600
-  when: dataset_should_backup and dataset_type == 'pipe'
-  notify:
-    - reload backup agent
+    group: "{{ dataset_group }}"
+  when: "dataset_is_present and dataset_path and dataset_desired_owner"
 
-- name: Clear configuration for dataset {{ dataset_name }}
-  file:
-    path: "/etc/tabacco/{{ diritem }}/{{ dataset_filename }}.yml"
-    state: absent
-  when: not dataset_should_backup
-  with_items:
-    - sources
-    - handlers
-  loop_control:
-    loop_var: diritem
+- include_tasks: dataset_tabacco.yml
+  when: "dataset_driver == 'tabacco'"
 
-- name: Create restore script
-  template:
-    src: "restore-script.j2"
-    dest: "/usr/lib/float/datasets/restore-{{ dataset_filename }}"
-    mode: 0755
-  when: dataset_should_restore
+- include_tasks: dataset_litestream.yml
+  when: "dataset_driver == 'litestream'"
 
+# Set up a restore unit that will need to run before the main service
+# units (via a Before= clause) to restore data from backups, if any.
+# These units run a driver-dependent restore script, that is protected
+# by a "guard file", to ensure that the restore script is only run
+# once, whenever the service is newly scheduled on a host.
 - name: Create restore service unit
   template:
     src: "restore-service.j2"
     dest: "/etc/systemd/system/{{ dataset_restore_unit }}"
-    mode: 0444
+    mode: 0644
+  when: dataset_should_restore
+
+- name: Create restore script
+  template:
+    src: "{{ dataset_driver }}-restore-script.j2"
+    dest: "/usr/lib/float/datasets/restore-{{ dataset_tag }}"
+    mode: 0755
   when: dataset_should_restore
 
 - name: Enable restore service unit
@@ -95,12 +85,11 @@
     daemon_reload: yes
   when: dataset_should_restore
 
-# systemd disable is not idempotent, hence the ignore_errors.
 - name: Disable restore service unit
   systemd:
     name: "{{ dataset_restore_unit }}"
     enabled: no
-  when: "(not dataset_should_restore) and (dataset_restore_unit in loaded_restore_systemd_units.stdout_lines)"
+  when: "(not dataset_should_restore) and (dataset_restore_unit in loaded_backup_systemd_units.stdout_lines)"
 
 - name: Cleanup restore service unit
   file:
@@ -111,6 +100,6 @@
 
 - name: Wipe dataset restore guard file
   file:
-    path: "/var/lib/float/datasets/{{ dataset_filename }}.restore_guard"
+    path: "/var/lib/float/datasets/{{ dataset_tag }}.restore_guard"
     state: absent
   when: not dataset_should_backup
diff --git a/roles/float-base-datasets/tasks/dataset_litestream.yml b/roles/float-base-datasets/tasks/dataset_litestream.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7e312408100365843fdbe187df6e2621b1e85c09
--- /dev/null
+++ b/roles/float-base-datasets/tasks/dataset_litestream.yml
@@ -0,0 +1,83 @@
+---
+
+- set_fact:
+    dataset_filename: "{{ dataset.filename }}"
+    dataset_replica_url: "{{ backup_litestream_url | default('') }}/{{ dataset_tag }}"
+    dataset_replication_unit: "replicate-{{ dataset_tag }}.service"
+    # Just don't backup at all if litestream is not configured.
+    dataset_should_backup: "{{ dataset_should_backup and (backup_litestream_config is defined) }}"
+
+# Automatically set the replication path for s3-type configs. Create a
+# copy of backup_litestream_config that is specific to this dataset.
+- set_fact:
+    dataset_litestream_config: "{{ backup_litestream_config }}"
+  when: dataset_should_backup
+
+- set_fact:
+    dataset_litestream_config: "{{ dataset_litestream_config | combine({'path': dataset_tag}) }}"
+  when: "dataset_should_backup and backup_litestream_config.get('type', 's3') == 's3'"
+
+- set_fact:
+    litestream_config:
+      dbs:
+        - path: "{{ dataset_path }}/{{ dataset_filename }}"
+          replicas: ["{{ dataset_litestream_config }}"]
+  when: dataset_should_backup
+
+- name: Create dataset litestream config
+  copy:
+    dest: "/etc/litestream/{{ dataset_tag }}.yml"
+    content: "{{ litestream_config | to_yaml }}\n"
+    owner: "{{ dataset_owner }}"
+    group: "{{ dataset_group }}"
+    mode: "{{ dataset_mode or '750' }}"
+  when: dataset_should_backup
+  register: ls_config
+
+- name: Create dataset litestream credentials config
+  template:
+    src: "litestream-env.j2"
+    dest: "/etc/litestream/{{ dataset_tag }}.env"
+    owner: "{{ dataset_owner }}"
+    group: "{{ dataset_group }}"
+    mode: "{{ dataset_mode or '750' }}"
+  when: dataset_should_backup
+  register: ls_env
+
+- name: Create dataset litestream replication systemd unit
+  template:
+    src: "litestream-replicate-service.j2"
+    dest: "/etc/systemd/system/{{ dataset_replication_unit }}"
+  when: dataset_should_backup
+  register: ls_unit
+
+# Since we can't parameterize handlers, we're forced to detect
+# needs-restart ourselves using the results from the previous tasks.
+- set_fact:
+    litestream_restart: "{{ ls_config.changed or ls_env.changed or ls_unit.changed }}"
+
+- name: Enable the litestream replication systemd unit
+  systemd:
+    name: "{{ dataset_replication_unit }}"
+    enabled: true
+    state: "{{ 'restarted' if litestream_restart else 'started' }}"
+    daemon_reload: true
+  when: dataset_should_backup
+
+- name: Disable the litestream replication systemd unit
+  systemd:
+    name: "{{ dataset_replication_unit}}"
+    enabled: false
+  when: "(not dataset_should_backup) and (dataset_replication_unit in loaded_backup_systemd_units.stdout_lines)"
+
+- name: Delete dataset litestream replication configs
+  file:
+    path: "{{ diritem }}"
+    state: absent
+  when: not dataset_should_backup
+  loop:
+    - "/etc/litestream/{{ dataset_tag }}.yml"
+    - "/etc/systemd/system/{{ dataset_replication_unit }}"
+  loop_control:
+    loop_var: diritem
+  
diff --git a/roles/float-base-datasets/tasks/dataset_tabacco.yml b/roles/float-base-datasets/tasks/dataset_tabacco.yml
new file mode 100644
index 0000000000000000000000000000000000000000..902e524da6a64ae221ecaedacf0ddeb4c5c16c39
--- /dev/null
+++ b/roles/float-base-datasets/tasks/dataset_tabacco.yml
@@ -0,0 +1,47 @@
+---
+
+- set_fact:
+    dataset_type: "{{ 'pipe' if 'backup_command' in dataset else 'file' }}"
+    dataset_should_backup: "{{ dataset_should_backup and (backup_repository_uri is defined) }}"
+
+- name: Set up configuration for dataset {{ dataset_name }} (source)
+  template:
+    src: "sources/source.yml.j2"
+    dest: "/etc/tabacco/sources/{{ dataset_tag }}.yml"
+    mode: 0600
+  when: dataset_should_backup
+  notify:
+    - reload backup agent
+
+- name: Set up configuration for dataset {{ dataset_name }} (handler)
+  template:
+    src: "handlers/{{ dataset_type }}.yml.j2"
+    dest: "/etc/tabacco/handlers/{{ dataset_tag }}.yml"
+    mode: 0600
+  when: dataset_should_backup and dataset_type == 'pipe'
+  notify:
+    - reload backup agent
+
+- name: Clear configuration for dataset {{ dataset_name }}
+  file:
+    path: "/etc/tabacco/{{ diritem }}/{{ dataset_tag }}.yml"
+    state: absent
+  when: not dataset_should_backup
+  with_items:
+    - sources
+    - handlers
+  loop_control:
+    loop_var: diritem
+
+- name: Create restore script
+  template:
+    src: "tabacco-restore-script.j2"
+    dest: "/usr/lib/float/datasets/restore-{{ dataset_tag }}"
+    mode: 0755
+  when: dataset_should_restore
+
+- name: Delete restore script
+  file:
+    path: "/usr/lib/float/datasets/restore-{{ dataset_tag }}"
+    state: absent
+  when: not dataset_should_restore
diff --git a/roles/float-base-datasets/tasks/main.yml b/roles/float-base-datasets/tasks/main.yml
index a7a32a75479cac0ee016193d7285c6b499ca190e..1e1b94e9e01c3605dc947f89f056480053201013 100644
--- a/roles/float-base-datasets/tasks/main.yml
+++ b/roles/float-base-datasets/tasks/main.yml
@@ -35,15 +35,31 @@
     enabled: yes
   when: backup_repository_uri is defined
 
-- file:
-    path: /usr/lib/float/datasets
+- name: Create backup-related directories
+  file:
+    path: "{{ item }}"
     state: directory
+    owner: root
+    group: root
+    mode: 0755
+  loop:
+    - "/usr/lib/float/datasets"
+    - "/etc/litestream"
+
+- name: Create restore wrapper script
+  copy:
+    src: "float-dataset-restore"
+    dest: "/usr/lib/float/float-dataset-restore"
+    mode: 0755
 
-- name: Obtain list of restore service units
-  shell: "systemctl list-units --no-legend --no-pager --full --type service restore-\\* | awk '{print $1}'"
+# Ansible systemd unit will fail when trying to disable a unit that
+# does not exist. To avoid such errors, we gather the list of known
+# service units and use it later to check for existance.
+- name: Obtain list of backup-related service units
+  shell: "systemctl list-units --no-legend --no-pager --full --type service restore-\\* replicate-\\* | awk '{print $1}'"
   check_mode: no
   changed_when: false
-  register: loaded_restore_systemd_units
+  register: loaded_backup_systemd_units
 
 - include_tasks: dataset.yml
   loop: "{{ services | subelements('datasets', skip_missing=True) }}"
diff --git a/roles/float-base-datasets/templates/litestream-env.j2 b/roles/float-base-datasets/templates/litestream-env.j2
new file mode 100644
index 0000000000000000000000000000000000000000..2fefe3b29a7f513ed3c00942e6a81b065ee08435
--- /dev/null
+++ b/roles/float-base-datasets/templates/litestream-env.j2
@@ -0,0 +1,3 @@
+{% for var, value in backup_litestream_credentials | default({}) | dictsort %}
+{{ var }}="{{ value }}"
+{% endfor %}
diff --git a/roles/float-base-datasets/templates/litestream-replicate-service.j2 b/roles/float-base-datasets/templates/litestream-replicate-service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..053d4bfd5f2c5ae5e4f475d4b70201b70931e213
--- /dev/null
+++ b/roles/float-base-datasets/templates/litestream-replicate-service.j2
@@ -0,0 +1,24 @@
+{% set required_by = service.systemd_services | default([]) %}
+
+[Unit]
+Description=Replicate dataset {{ dataset_name }}
+After={{ required_by | join(' ') }}
+PartOf={{ required_by | join(' ') }}
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=3
+EnvironmentFile=/etc/litestream/{{ dataset_tag }}.env
+ExecStart=/usr/bin/litestream replicate --config=/etc/litestream/{{ dataset_tag }}.yml
+{% if dataset_owner %}
+User={{ dataset_owner }}
+{% endif %}
+Group={{ dataset_group }}
+
+NoNewPrivileges=true
+ReadOnlyDirectories=/
+ReadWriteDirectories={{ dataset_path }}
+
+[Install]
+RequiredBy={{ required_by | join(' ') }}
diff --git a/roles/float-base-datasets/templates/litestream-restore-script.j2 b/roles/float-base-datasets/templates/litestream-restore-script.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4d0d28a91aa0f8eada5134e61cb1fcf1d14e365b
--- /dev/null
+++ b/roles/float-base-datasets/templates/litestream-restore-script.j2
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+{% if backup_litestream_url is defined %}
+# Restore the dataset {{ dataset_name }} using litestream.
+
+/usr/bin/litestream restore --config=/etc/litestream/{{ dataset_tag }}.yml --if-replica-exists -v "{{ dataset_path }}/{{ dataset_filename }}"
+
+if [ $? -gt 0 ]; then
+    echo "ERROR: restore failed!" >&2
+    exit 1
+fi
+
+chown -R "{{ dataset_desired_owner }}":"{{ dataset_group }}" "{{ dataset_path }}"
+{% endif %}
+
+exit 0
diff --git a/roles/float-base-datasets/templates/restore-service.j2 b/roles/float-base-datasets/templates/restore-service.j2
index 592871a7660513377a4cab487105d9c3c1d80359..07801edba25c17acef7ca94a6de409ee0cf96da5 100644
--- a/roles/float-base-datasets/templates/restore-service.j2
+++ b/roles/float-base-datasets/templates/restore-service.j2
@@ -5,13 +5,9 @@ Description=Restore dataset {{ dataset_name }}
 Before={{ required_by | join(' ') }}
 
 [Service]
-{% if backup_repository_uri is defined %}
-ExecStart=/usr/lib/float/datasets/restore-{{ dataset_filename }}
-{% else %}
-ExecStart=/bin/true
-{% endif %}
 Type=oneshot
 RemainAfterExit=true
+ExecStart=/usr/lib/float/float-dataset-restore {{ dataset_tag }}
 
 [Install]
 RequiredBy={{ required_by | join(' ') }}
diff --git a/roles/float-base-datasets/templates/restore-script.j2 b/roles/float-base-datasets/templates/tabacco-restore-script.j2
similarity index 50%
rename from roles/float-base-datasets/templates/restore-script.j2
rename to roles/float-base-datasets/templates/tabacco-restore-script.j2
index 3d05f23aa3970c2466edc2cc3404b3869005ab65..9158918f5f458795ef614ac184103a5d85ea53e4 100644
--- a/roles/float-base-datasets/templates/restore-script.j2
+++ b/roles/float-base-datasets/templates/tabacco-restore-script.j2
@@ -1,21 +1,7 @@
 #!/bin/sh
 
-# Restore the dataset {{ dataset_name }}.
-
-# Uses a guard file to ensure the restore runs only once
-# on a specific machine (or actually, once every time the
-# service is newly scheduled there).
-
-umask 077
-
-guard_dir=/var/lib/float/datasets
-mkdir -p ${guard_dir}
-
-guard_file="${guard_dir}/{{ dataset_filename }}.restore_guard"
-if [ -e "${guard_file}" ]; then
-    echo "restore already ran for this dataset, skipping..." >&2
-    exit 0
-fi
+{% if backup_repository_url is defined %}
+# Restore the dataset {{ dataset_name }} using tabacco.
 
 # Use 'tabacco query' to detect if a backup of this dataset exists,
 # otherwise there's nothing to restore (the service might be new
@@ -25,6 +11,7 @@ ds=$(tabacco query "${ds_pattern}" 2>/dev/null)
 if [ "x${ds}" = "x[]" ]; then
     echo "could not find any backups for ${ds_pattern}" >&2
     echo "nothing to restore, skipping..." >&2
+    exit 0
 else
     echo "starting restore of ${ds_pattern}..." >&2
     tabacco restore --target / "${ds_pattern}"
@@ -34,11 +21,9 @@ else
     fi
 fi
 
-{% if dataset_path and dataset_owner %}
-chown -R "{{ dataset_owner }}":"{{ dataset_group }}" "{{ dataset_path }}"
+{% if dataset_path and dataset_desired_owner %}
+chown -R "{{ dataset_desired_owner }}":"{{ dataset_group }}" "{{ dataset_path }}"
+{% endif %}
 {% endif %}
-
-echo "marking restore successful" >&2
-touch "${guard_file}"
 
 exit 0
diff --git a/roles/float-base/tasks/apt.yml b/roles/float-base/tasks/apt.yml
index 41b5ffb680bc10cbf99bae2ac2273d2065e2cb73..688c869f629d5483dfdc13b90efb7a420e634b4b 100644
--- a/roles/float-base/tasks/apt.yml
+++ b/roles/float-base/tasks/apt.yml
@@ -109,6 +109,7 @@
       - logcat
       - tabacco
       - restic
+      - litestream
       - runcron
       - acpid
       - zstd
diff --git a/test/backup.ref/config-backup.yml b/test/backup.ref/config-backup.yml
new file mode 100644
index 0000000000000000000000000000000000000000..380578ba3707719cad3ae55a40533a9a648ea87b
--- /dev/null
+++ b/test/backup.ref/config-backup.yml
@@ -0,0 +1,9 @@
+---
+backup_litestream_config:
+  type: s3
+  endpoint: "http://backup:9000/"
+  bucket: "backuptest"
+backup_litestream_credentials:
+  LITESTREAM_ACCESS_KEY_ID: "minio"
+  LITESTREAM_SECRET_ACCESS_KEY: "miniopassword"
+
diff --git a/test/backup.ref/passwords.yml b/test/backup.ref/passwords.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7bdaf7c10ca8171663b0672fdc61d901650bab2b
--- /dev/null
+++ b/test/backup.ref/passwords.yml
@@ -0,0 +1,2 @@
+---
+- include: ../../passwords.yml.default
diff --git a/test/backup.ref/services.yml b/test/backup.ref/services.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8db1002a3f0a6db315a1f1f2cd6a57c8885046e4
--- /dev/null
+++ b/test/backup.ref/services.yml
@@ -0,0 +1,43 @@
+---
+
+include:
+  - "../../services.yml.no-elasticsearch"
+
+ok:
+  scheduling_group: backend
+  containers:
+    - name: http
+      image: registry.git.autistici.org/ai3/docker/okserver:latest
+      port: 3100
+      env:
+        PORT: 3100
+      resources:
+        ram: 1g
+        cpu: 0.5
+  public_endpoints:
+    - name: ok
+      port: 3100
+      scheme: http
+
+backup:
+  scheduling_group: backend
+  num_instances: 1
+  containers:
+    - name: s3
+      image: quay.io/minio/minio
+      port: 9000
+      env:
+        HOME: /data
+        MINIO_ROOT_USER: minio
+        MINIO_ROOT_PASSWORD: miniopassword
+      args: "server /data --console-address :9001"
+      volumes:
+        - /var/lib/backup: /data
+  ports:
+    - 9000
+  volumes:
+    - name: backup
+      path: /var/lib/backup
+      owner: docker-backup
+      size: 2g
+
diff --git a/test/backup.ref/site.yml b/test/backup.ref/site.yml
new file mode 100644
index 0000000000000000000000000000000000000000..601f945b9ad12cbbbed11a7f58c93575500d8572
--- /dev/null
+++ b/test/backup.ref/site.yml
@@ -0,0 +1,10 @@
+---
+
+- import_playbook: "../../playbooks/all.yml"
+
+- hosts: backup
+  tasks:
+    - name: Create the test bucket
+      run_once: true
+      command: "podman run --env MC_HOST_local=http://minio:miniopassword@backup:9000 --network host --rm quay.io/minio/mc mb local/backuptest"
+