Added syncoid service

Sets up ZFS replication. Syncoid modules handles configuration for both
the remote and the target but I'm thinking I should split this up in the
future.
This commit is contained in:
Alejandro Angulo 2023-08-03 17:21:58 -07:00
parent 30a6e58691
commit ff6fa1c1f3
Signed by: alejandro-angulo
GPG key ID: 75579581C74554B6
4 changed files with 108 additions and 1 deletions

View file

@ -0,0 +1,80 @@
{
options,
config,
pkgs,
lib,
...
}:
with lib; let
cfg = config.aa.services.syncoid;
in {
options.aa.services.syncoid = with types; {
enable = mkEnableOption "syncoid (ZFS snap replication)";
commands = mkOption {
type = attrs;
default = {};
description = "Commands to pass directly to syncoid, see `services.syncoid.commands`";
};
remoteTargetUser = mkOption {
type = str;
default = "";
description = "The user to use on the target machine.";
};
remoteTargetDatasets = mkOption {
type = listOf str;
default = [];
description = "Datasets to be used as a remote target (e.g. a NAS's backups dataset)";
};
remoteTargetPublicKeys = mkOption {
type = listOf str;
default = [];
description = "SSH public keys that the syncoid service's user should trust";
};
};
config = mkIf cfg.enable {
services.syncoid = {
enable = true;
commands = mkAliasDefinitions options.aa.services.syncoid.commands;
};
environment.systemPackages = mkIf (cfg.remoteTargetUser != "") [
pkgs.lzop
pkgs.mbuffer
];
users = mkIf (cfg.remoteTargetUser != "") {
users."${cfg.remoteTargetUser}" = {
shell = pkgs.bashInteractive;
group = cfg.remoteTargetUser;
isSystemUser = true;
home = "/var/lib/${cfg.remoteTargetUser}";
createHome = true;
openssh.authorizedKeys.keys = cfg.remoteTargetPublicKeys;
};
groups."${cfg.remoteTargetUser}" = {};
};
systemd.services.setup-syncoid-remote = {
description = "Permission setup for syncoid remote targets";
documentation = ["https://github.com/jimsalterjrs/sanoid/wiki/Syncoid#running-without-root"];
wantedBy = ["multi-user.target"];
path = [pkgs.zfs];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = "yes";
};
script = ''
DATASETS=(${toString cfg.remoteTargetDatasets})
for dataset in "''${DATASETS[@]}"; do
zfs allow \
-u ${cfg.remoteTargetUser} \
compression,mountpoint,create,mount,receive,rollback,destroy \
"$dataset"
done
'';
};
};
}

View file

@ -16,7 +16,12 @@ in {
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.zfs = { services.zfs = {
autoScrub.enable = true; autoScrub.enable = true;
autoSnapshot.enable = true; # Still need to set `com.sun:auto-snapshot` to `true` on datasets
# zfs set com.sun:auto-snapshot=true pool/dataset
autoSnapshot = {
enable = true;
flags = "-k -p --utc";
};
}; };
}; };
} }

View file

@ -41,6 +41,21 @@
configureClientRouting = true; configureClientRouting = true;
configureServerRouting = true; configureServerRouting = true;
}; };
services.syncoid = {
enable = true;
commands = {
"bpool" = {
target = "backups@192.168.113.13:tank/backups/gospel/bpool";
recursive = true;
sshKey = "/var/lib/syncoid/.ssh/id_ed25519";
};
"rpool" = {
target = "backups@192.168.113.13:tank/backups/gospel/rpool";
recursive = true;
sshKey = "/var/lib/syncoid/.ssh/id_ed25519";
};
};
};
hardware.audio.enable = true; hardware.audio.enable = true;
hardware.bluetooth.enable = true; hardware.bluetooth.enable = true;

View file

@ -28,6 +28,13 @@
enable = true; enable = true;
acmeCertName = "kilonull.com"; acmeCertName = "kilonull.com";
}; };
services.syncoid = {
# sudo -u backups zfs create -o mountpoint=/tank/backups/gospel tank/backups/gospel
enable = true;
remoteTargetUser = "backups";
remoteTargetDatasets = ["tank/backups"];
remoteTargetPublicKeys = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhA+9O2OBMDH1Xnj6isu36df5TOdZG8aEA4JpN2K60e syncoid@gospel"];
};
security.acme = { security.acme = {
enable = true; enable = true;