manager.go 8.12 KB
Newer Older
ale's avatar
ale committed
1 2 3 4 5 6 7 8
package tabacco

import (
	"context"
	"fmt"
	"log"
	"os"
	"time"
ale's avatar
ale committed
9 10 11

	"git.autistici.org/ale/tabacco/jobs"
	"git.autistici.org/ale/tabacco/util"
ale's avatar
ale committed
12 13 14
)

// Manager for backups and restores.
15
type tabaccoManager struct {
ale's avatar
ale committed
16 17 18
	*jobs.ExclusiveLockManager
	*jobs.QueueManager
	*jobs.StateManager
19
	*workdirManager
ale's avatar
ale committed
20

ale's avatar
ale committed
21
	configMgr *ConfigManager
ale's avatar
ale committed
22
	ms        MetadataStore
ale's avatar
ale committed
23 24 25
}

// NewManager creates a new Manager.
ale's avatar
ale committed
26
func NewManager(ctx context.Context, configMgr *ConfigManager, ms MetadataStore) (Manager, error) {
27 28 29 30 31 32 33
	// If we can't create a workdirManager, it probably means we
	// don't have permissions to the WorkDir, which is bad.
	wm, err := newWorkdirManager(configMgr.getWorkDir())
	if err != nil {
		return nil, err
	}

ale's avatar
ale committed
34
	// Note: the queue configuration won't be reloaded.
35
	return &tabaccoManager{
ale's avatar
ale committed
36 37 38
		ExclusiveLockManager: jobs.NewExclusiveLockManager(),
		QueueManager:         jobs.NewQueueManager(configMgr.getQueueSpec()),
		StateManager:         jobs.NewStateManager(),
39
		workdirManager:       wm,
ale's avatar
ale committed
40

ale's avatar
ale committed
41 42
		configMgr: configMgr,
		ms:        ms,
ale's avatar
ale committed
43 44 45 46 47
	}, nil
}

// Close the Manager and free all associated resources (those owned by
// this object).
48
func (m *tabaccoManager) Close() error {
ale's avatar
ale committed
49
	return nil
ale's avatar
ale committed
50 51
}

ale's avatar
ale committed
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
type metadataJob struct {
	jobs.Job
	ms     MetadataStore
	backup Backup
	ds     Dataset
}

func (j *metadataJob) RunContext(ctx context.Context) error {
	err := j.Job.RunContext(ctx)
	if err == nil {
		if merr := j.ms.AddDataset(ctx, j.backup, j.ds); merr != nil {
			log.Printf("%s: error saving metadata: %v", j.ds.Name, merr)
		}
	}
	return err
}

func (m *tabaccoManager) withMetadata(j jobs.Job, backup Backup, ds Dataset) jobs.Job {
	return &metadataJob{
		Job:    j,
		ms:     m.ms,
		backup: backup,
		ds:     ds,
	}
}

ale's avatar
ale committed
78 79 80
// Prepare the repository for a new backup. This is a synchronous
// operation: we need to wait for it to complete to avoid running the
// backup tasks too soon.
ale's avatar
ale committed
81 82
func (m *tabaccoManager) prepareBackupJob(backup Backup) jobs.Job {
	return jobs.JobFunc(func(ctx context.Context) error {
ale's avatar
ale committed
83
		repo := m.configMgr.getRepository()
ale's avatar
ale committed
84
		if err := repo.Init(ctx); err != nil {
ale's avatar
ale committed
85 86 87
			log.Printf("repository init failed: %v", err)
			return err
		}
ale's avatar
ale committed
88 89
		log.Printf("preparing backup %s", backup.ID)
		return repo.Prepare(ctx, backup)
ale's avatar
ale committed
90
	})
ale's avatar
ale committed
91 92
}

93 94 95
func (m *tabaccoManager) backupDatasetJob(h Handler, backup Backup, ds Dataset) jobs.Job {
	var out []jobs.Job

ale's avatar
ale committed
96 97 98 99
	// Let Handlers modify the Dataset if necessary, or generate
	// more than one.
	dsl := h.DatasetsForBackup(ds)

100 101 102 103 104
	// Run pre_backup_command.
	if cmd := h.Spec().PreBackupCommand; cmd != "" {
		out = append(out, m.datasetCommandJob(cmd, backup, ds))
	}

ale's avatar
ale committed
105 106 107 108 109 110 111 112 113 114 115 116
	// The actual backup operation. Assemble all the backup jobs
	// for the datasets in 'dsl', and add them to an AsyncGroup.
	//
	// TODO: get the timeout from the SourceSpec.
	var backupJobs []jobs.Job
	for _, realDS := range dsl {
		backupJobs = append(backupJobs, m.withMetadata(
			h.BackupJob(m.configMgr.getRepository(), backup, realDS),
			backup,
			realDS,
		))
	}
117
	out = append(out, jobs.WithTimeout(
ale's avatar
ale committed
118
		jobs.AsyncGroup(backupJobs),
119 120 121 122 123 124 125 126 127 128 129 130 131 132
		24*time.Hour,
	))

	// Run post_backup_command.
	if cmd := h.Spec().PostBackupCommand; cmd != "" {
		out = append(out, m.datasetCommandJob(cmd, backup, ds))
	}

	// Group the jobs (sequentially) if there's more than one of
	// them. Give the final job a status and a user-visible name,
	// for debugging purposes. Set an exclusive lock with a
	// leave-running policy, so no more than one backup per
	// datasource can run at any given time. Finally, the job runs
	// in the 'backup' queue for concurrency limiting.
133 134
	//
	// Oh, and here is where we add per-dataset instrumentation.
135 136 137
	id := fmt.Sprintf("backup-dataset-%s", ds.Name)
	return m.WithQueue(
		m.WithStatus(
138
			m.WithExclusiveLock(
139 140 141 142 143
				m.withWorkDir(
					withInstrumentation(
						jobs.SyncGroup(out),
						ds.Name,
					),
144 145 146
				),
				id,
				false),
147 148 149 150
			id,
		),
		"backup",
	)
ale's avatar
ale committed
151 152
}

ale's avatar
ale committed
153 154
// BackupJob returns a single Job that backs up one or more sources to
// the configured destination repository.
ale's avatar
ale committed
155
func (m *tabaccoManager) BackupJob(ctx context.Context, sourceSpecs []SourceSpec) (Backup, jobs.Job, error) {
ale's avatar
ale committed
156 157 158 159 160
	// Parse the source specs and obtain Datasets. Errors here are
	// logged but *not* fatal, unless there are errors and the
	// list of non-erroring sources is nil.

	backup := newBackup("")
ale's avatar
ale committed
161
	prepJob := m.prepareBackupJob(backup)
ale's avatar
ale committed
162
	var backupJobs []jobs.Job
ale's avatar
ale committed
163 164 165 166

	merr := new(util.MultiError)
	for _, spec := range sourceSpecs {
		h, ok := m.configMgr.getHandler(spec.Handler)
167
		if !ok {
ale's avatar
ale committed
168 169 170 171 172 173
			return Backup{}, nil, fmt.Errorf("inconsistency: no '%s' handler", spec.Handler)
		}

		ds, err := spec.Parse(ctx)
		if err != nil {
			merr.Add(err)
174 175
			continue
		}
ale's avatar
ale committed
176

177 178 179 180 181 182 183 184 185 186 187 188 189
		// Create the backup job and add it to our list.
		backupJobs = append(backupJobs, m.backupDatasetJob(h, backup, ds))
	}

	// Run the job to initialize the repository before anything else.
	j := m.WithStatus(
		jobs.SyncGroup([]jobs.Job{
			prepJob,
			jobs.AsyncGroup(backupJobs),
		}),
		fmt.Sprintf("backup-%s", backup.ID),
	)
	return backup, j, nil
ale's avatar
ale committed
190 191 192 193 194 195 196
}

// Backup just runs the BackupJob synchronously.
func (m *tabaccoManager) Backup(ctx context.Context, sourceSpecs []SourceSpec) (Backup, error) {
	backup, job, err := m.BackupJob(ctx, sourceSpecs)
	if err != nil {
		return backup, err
ale's avatar
ale committed
197
	}
ale's avatar
ale committed
198 199 200
	err = job.RunContext(ctx)
	return backup, err
}
ale's avatar
ale committed
201

202 203 204
func (m *tabaccoManager) restoreDatasetJob(h Handler, backup Backup, ds Dataset, target string) jobs.Job {
	var out []jobs.Job

ale's avatar
ale committed
205 206
	dsl := h.DatasetsForRestore(ds)

207 208 209 210 211 212 213 214
	// Run pre_restore_command.
	if cmd := h.Spec().PreRestoreCommand; cmd != "" {
		out = append(out, m.datasetCommandJob(cmd, backup, ds))
	}

	// The actual backup operation. Just a thin wrapper around
	// doBackupDataset() that binds together the context, backup,
	// ds and target via the closure.
ale's avatar
ale committed
215 216 217 218 219
	var restoreJobs []jobs.Job
	for _, realDS := range dsl {
		restoreJobs = append(restoreJobs, h.RestoreJob(m.configMgr.getRepository(), backup, realDS, target))
	}
	out = append(out, jobs.AsyncGroup(restoreJobs))
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

	// Run post_restore_command.
	if cmd := h.Spec().PostRestoreCommand; cmd != "" {
		out = append(out, m.datasetCommandJob(cmd, backup, ds))
	}

	// Group the jobs (sequentially) if there's more than one of
	// them. Give the final job a status and a user-visible name,
	// for debugging purposes. Set an exclusive lock with a
	// leave-running policy, so no more than one restore per
	// datasource can run at any given time. Finally, the job runs
	// in the 'restore' queue for concurrency limiting.
	id := fmt.Sprintf("restore_%s", ds.Name)
	return m.WithQueue(
		m.WithStatus(
			m.WithExclusiveLock(jobs.SyncGroup(out), id, false),
			id,
		),
		"restore",
	)
ale's avatar
ale committed
240 241
}

ale's avatar
ale committed
242 243
// RestoreJob creates a job that restores the results of the
// FindRequest (with NumVersions=1) onto the given target directory.
ale's avatar
ale committed
244
func (m *tabaccoManager) RestoreJob(ctx context.Context, req FindRequest, target string) (jobs.Job, error) {
ale's avatar
ale committed
245
	// Find the atoms relevant to this restore.
246
	req.NumVersions = 1
ale's avatar
ale committed
247 248
	versions, err := m.ms.FindAtoms(ctx, req)
	if err != nil {
ale's avatar
ale committed
249
		return nil, err
ale's avatar
ale committed
250 251
	}

ale's avatar
ale committed
252
	var restoreJobs []jobs.Job
ale's avatar
ale committed
253
	for _, vv := range versions {
254 255 256 257 258 259 260 261 262 263
		ds := vv[0].Dataset
		backup := vv[0].Backup

		h, ok := m.configMgr.getHandler(ds.Handler)
		if !ok {
			log.Printf("%s: unknown handler '%s'", ds.Name, ds.Handler)
			continue
		}

		restoreJobs = append(restoreJobs, m.restoreDatasetJob(h, backup, ds, target))
ale's avatar
ale committed
264 265
	}

266
	return m.WithStatus(jobs.AsyncGroup(restoreJobs), fmt.Sprintf("restore_%s", util.RandomID())), nil
ale's avatar
ale committed
267 268 269 270 271 272 273
}

// Restore just runs the RestoreJob synchronously.
func (m *tabaccoManager) Restore(ctx context.Context, req FindRequest, target string) error {
	job, err := m.RestoreJob(ctx, req, target)
	if err != nil {
		return err
ale's avatar
ale committed
274
	}
ale's avatar
ale committed
275
	return job.RunContext(ctx)
ale's avatar
ale committed
276 277 278 279 280 281
}

// Create a new Backup object with its own unique ID (which actually
// consists of 16 random bytes, hex-encoded).
func newBackup(host string) Backup {
	if host == "" {
282
		host, _ = os.Hostname() // nolint
ale's avatar
ale committed
283 284
	}
	return Backup{
ale's avatar
ale committed
285
		ID:        util.RandomID(),
ale's avatar
ale committed
286 287 288 289
		Host:      host,
		Timestamp: time.Now(),
	}
}
290 291 292 293 294 295 296 297 298 299

func (m *tabaccoManager) datasetCommandJob(cmd string, backup Backup, ds Dataset) jobs.Job {
	env := map[string]string{
		"BACKUP_ID":    backup.ID,
		"DATASET_NAME": ds.Name,
	}
	return jobs.JobFunc(func(ctx context.Context) error {
		return m.configMgr.getShell().RunWithEnv(ctx, cmd, env)
	})
}