From 86607fb46584735a5c947542657c3bb692f01fce Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 7 Aug 2021 13:45:56 -0500 Subject: [PATCH 01/53] Refactor data to settings (specificity) --- src/cli.rs | 16 ++++++++-------- src/lib.rs | 14 +++++++------- src/{data.rs => settings.rs} | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) rename src/{data.rs => settings.rs} (99%) diff --git a/src/cli.rs b/src/cli.rs index 61890e43..68ac6c97 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{DeployFlake, ParseFlakeError}; +use self::deploy::{DeployFlake, ParseFlakeError, settings}; use futures_util::stream::{StreamExt, TryStreamExt}; use log::{debug, error, info, warn}; use serde::Serialize; @@ -170,7 +170,7 @@ async fn get_deployment_data( supports_flakes: bool, flakes: &[deploy::DeployFlake<'_>], extra_build_args: &[String], -) -> Result, GetDeploymentDataError> { +) -> Result, GetDeploymentDataError> { futures_util::stream::iter(flakes).then(|flake| async move { info!("Evaluating flake in {}", flake.repo); @@ -389,14 +389,14 @@ pub enum RunDeployError { type ToDeploy<'a> = Vec<( &'a deploy::DeployFlake<'a>, - &'a deploy::data::Data, - (&'a str, &'a deploy::data::Node), - (&'a str, &'a deploy::data::Profile), + &'a settings::Root, + (&'a str, &'a settings::Node), + (&'a str, &'a settings::Profile), )>; async fn run_deploy( deploy_flakes: Vec>, - data: Vec, + data: Vec, supports_flakes: bool, check_sigs: bool, interactive: bool, @@ -437,7 +437,7 @@ async fn run_deploy( None => return Err(RunDeployError::NodeNotFound(node_name.clone())), }; - let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new(); + let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); for profile_name in [ node.node_settings.profiles_order.iter().collect(), @@ -466,7 +466,7 @@ async fn run_deploy( let mut l = Vec::new(); for (node_name, node) in &data.nodes { - let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new(); + let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); for profile_name in [ node.node_settings.profiles_order.iter().collect(), diff --git a/src/lib.rs b/src/lib.rs index 981ec1ed..630df179 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -145,7 +145,7 @@ pub fn init_logger( Ok(()) } -pub mod data; +pub mod settings; pub mod deploy; pub mod push; pub mod cli; @@ -312,13 +312,13 @@ fn test_parse_flake() { #[derive(Debug, Clone)] pub struct DeployData<'a> { pub node_name: &'a str, - pub node: &'a data::Node, + pub node: &'a settings::Node, pub profile_name: &'a str, - pub profile: &'a data::Profile, + pub profile: &'a settings::Profile, pub cmd_overrides: &'a CmdOverrides, - pub merged_settings: data::GenericSettings, + pub merged_settings: settings::GenericSettings, pub debug_logs: bool, pub log_dir: Option<&'a str>, @@ -395,10 +395,10 @@ impl<'a> DeployData<'a> { } pub fn make_deploy_data<'a, 's>( - top_settings: &'s data::GenericSettings, - node: &'a data::Node, + top_settings: &'s settings::GenericSettings, + node: &'a settings::Node, node_name: &'a str, - profile: &'a data::Profile, + profile: &'a settings::Profile, profile_name: &'a str, cmd_overrides: &'a CmdOverrides, debug_logs: bool, diff --git a/src/data.rs b/src/settings.rs similarity index 99% rename from src/data.rs rename to src/settings.rs index 6fe7f75f..9ce50a0f 100644 --- a/src/data.rs +++ b/src/settings.rs @@ -66,7 +66,7 @@ pub struct Node { } #[derive(Deserialize, Debug, Clone)] -pub struct Data { +pub struct Root { #[serde(flatten)] pub generic_settings: GenericSettings, pub nodes: HashMap, From 18eba4597397c6ceaa93fc70d98625da073b95fd Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Tue, 10 Aug 2021 16:02:49 -0600 Subject: [PATCH 02/53] Expand environmental variables in sshOpts --- Cargo.lock | 17 +++++++++++++++++ Cargo.toml | 1 + src/settings.rs | 23 +++++++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08ac3bfc..c1b1060e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -136,6 +136,7 @@ name = "deploy-rs" version = "0.1.0" dependencies = [ "clap", + "envmnt", "flexi_logger", "fork", "futures-util", @@ -154,6 +155,16 @@ dependencies = [ "yn", ] +[[package]] +name = "envmnt" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfac51e9996e41d78a943227b7f313efcebf545b21584a0e213b956a062e11e" +dependencies = [ + "fsio", + "indexmap", +] + [[package]] name = "filetime" version = "0.2.13" @@ -210,6 +221,12 @@ dependencies = [ "libc", ] +[[package]] +name = "fsio" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a50045aa8931ae01afbc5d72439e8f57f326becb8c70d07dfc816778eff3d167" + [[package]] name = "fuchsia-zircon" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index 0ded1259..cdeeb6af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ tokio = { version = "1.9.0", features = [ "full" ] } toml = "0.5" whoami = "0.9.0" yn = "0.1" +envmnt = "0.9.0" # smol_str is required by rnix, but 0.1.17 doesn't build on rustc # 1.45.2 (shipped in nixos-20.09); it requires rustc 1.46.0. See diff --git a/src/settings.rs b/src/settings.rs index 9ce50a0f..3bba2cac 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -2,8 +2,9 @@ // // SPDX-License-Identifier: MPL-2.0 +use envmnt::{self, ExpandOptions, ExpansionType}; use merge::Merge; -use serde::Deserialize; +use serde::{Deserialize, Deserializer}; use std::collections::HashMap; #[derive(Deserialize, Debug, Clone, Merge)] @@ -14,7 +15,8 @@ pub struct GenericSettings { #[serde( skip_serializing_if = "Vec::is_empty", default, - rename(deserialize = "sshOpts") + rename(deserialize = "sshOpts"), + deserialize_with = "GenericSettings::de_ssh_opts" )] #[merge(strategy = merge::vec::append)] pub ssh_opts: Vec, @@ -30,6 +32,23 @@ pub struct GenericSettings { pub magic_rollback: Option, } +impl GenericSettings { + fn de_ssh_opts<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let buf: Vec = Vec::deserialize(deserializer)?; + + let mut options = ExpandOptions::new(); + options.expansion_type = Some(ExpansionType::UnixBrackets); + + Ok(buf + .into_iter() + .map(|opt| envmnt::expand(&opt, Some(options))) + .collect()) + } +} + #[derive(Deserialize, Debug, Clone)] pub struct NodeSettings { pub hostname: String, From e6c50d912d61faabd779228352de23909f34b78a Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 7 Aug 2021 14:33:52 -0500 Subject: [PATCH 03/53] Refactor data structures into thier own module - preparation for a more view based data access --- src/cli.rs | 44 ++++---- src/data.rs | 300 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/deploy.rs | 16 +-- src/lib.rs | 296 +------------------------------------------------ src/push.rs | 6 +- 5 files changed, 335 insertions(+), 327 deletions(-) create mode 100644 src/data.rs diff --git a/src/cli.rs b/src/cli.rs index 68ac6c97..593ed06b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{DeployFlake, ParseFlakeError, settings}; +use self::deploy::{data, settings}; use futures_util::stream::{StreamExt, TryStreamExt}; use log::{debug, error, info, warn}; use serde::Serialize; @@ -168,7 +168,7 @@ pub enum GetDeploymentDataError { /// Evaluates the Nix in the given `repo` and return the processed Data from it async fn get_deployment_data( supports_flakes: bool, - flakes: &[deploy::DeployFlake<'_>], + flakes: &[data::DeployFlake<'_>], extra_build_args: &[String], ) -> Result, GetDeploymentDataError> { futures_util::stream::iter(flakes).then(|flake| async move { @@ -272,9 +272,9 @@ struct PromptPart<'a> { fn print_deployment( parts: &[( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )], ) -> Result<(), toml::ser::Error> { let mut part_map: HashMap> = HashMap::new(); @@ -315,9 +315,9 @@ pub enum PromptDeploymentError { fn prompt_deployment( parts: &[( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )], ) -> Result<(), PromptDeploymentError> { print_deployment(parts)?; @@ -378,7 +378,7 @@ pub enum RunDeployError { #[error("Profile was provided without a node name")] ProfileWithoutNode, #[error("Error processing deployment definitions: {0}")] - DeployDataDefs(#[from] deploy::DeployDataDefsError), + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), #[error("Failed to make printable TOML of deployment: {0}")] TomlFormat(#[from] toml::ser::Error), #[error("{0}")] @@ -388,19 +388,19 @@ pub enum RunDeployError { } type ToDeploy<'a> = Vec<( - &'a deploy::DeployFlake<'a>, + &'a data::DeployFlake<'a>, &'a settings::Root, (&'a str, &'a settings::Node), (&'a str, &'a settings::Profile), )>; async fn run_deploy( - deploy_flakes: Vec>, + deploy_flakes: Vec>, data: Vec, supports_flakes: bool, check_sigs: bool, interactive: bool, - cmd_overrides: &deploy::CmdOverrides, + cmd_overrides: &data::CmdOverrides, keep_result: bool, result_path: Option<&str>, extra_build_args: &[String], @@ -508,13 +508,13 @@ async fn run_deploy( .collect(); let mut parts: Vec<( - &deploy::DeployFlake<'_>, - deploy::DeployData, - deploy::DeployDefs, + &data::DeployFlake<'_>, + data::DeployData, + data::DeployDefs, )> = Vec::new(); for (deploy_flake, data, (node_name, node), (profile_name, profile)) in to_deploy { - let deploy_data = deploy::make_deploy_data( + let deploy_data = data::make_deploy_data( &data.generic_settings, node, node_name, @@ -550,7 +550,7 @@ async fn run_deploy( .await?; } - let mut succeeded: Vec<(&deploy::DeployData, &deploy::DeployDefs)> = vec![]; + let mut succeeded: Vec<(&data::DeployData, &data::DeployDefs)> = vec![]; // Run all deployments // In case of an error rollback any previoulsy made deployment. @@ -595,7 +595,7 @@ pub enum RunError { #[error("Failed to evaluate deployment data: {0}")] GetDeploymentData(#[from] GetDeploymentDataError), #[error("Error parsing flake: {0}")] - ParseFlake(#[from] deploy::ParseFlakeError), + ParseFlake(#[from] data::ParseFlakeError), #[error("Error initiating logger: {0}")] Logger(#[from] flexi_logger::FlexiLoggerError), #[error("{0}")] @@ -619,12 +619,12 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { .targets .unwrap_or_else(|| vec![opts.clone().target.unwrap_or_else(|| ".".to_string())]); - let deploy_flakes: Vec = deploys + let deploy_flakes: Vec = deploys .iter() - .map(|f| deploy::parse_flake(f.as_str())) - .collect::, ParseFlakeError>>()?; + .map(|f| data::parse_flake(f.as_str())) + .collect::, data::ParseFlakeError>>()?; - let cmd_overrides = deploy::CmdOverrides { + let cmd_overrides = data::CmdOverrides { ssh_user: opts.ssh_user, profile_user: opts.profile_user, ssh_opts: opts.ssh_opts, diff --git a/src/data.rs b/src/data.rs new file mode 100644 index 00000000..86e1b6cd --- /dev/null +++ b/src/data.rs @@ -0,0 +1,300 @@ +// SPDX-FileCopyrightText: 2020 Serokell +// SPDX-FileCopyrightText: 2021 Yannik Sander +// +// SPDX-License-Identifier: MPL-2.0 + +use rnix::{types::*, SyntaxKind::*}; +use merge::Merge; +use thiserror::Error; + +use crate::settings; + +#[derive(PartialEq, Debug)] +pub struct DeployFlake<'a> { + pub repo: &'a str, + pub node: Option, + pub profile: Option, +} + +#[derive(Error, Debug)] +pub enum ParseFlakeError { + #[error("The given path was too long, did you mean to put something in quotes?")] + PathTooLong, + #[error("Unrecognized node or token encountered")] + Unrecognized, +} + +pub fn parse_flake(flake: &str) -> Result { + let flake_fragment_start = flake.find('#'); + let (repo, maybe_fragment) = match flake_fragment_start { + Some(s) => (&flake[..s], Some(&flake[s + 1..])), + None => (flake, None), + }; + + let mut node: Option = None; + let mut profile: Option = None; + + if let Some(fragment) = maybe_fragment { + let ast = rnix::parse(fragment); + + let first_child = match ast.root().node().first_child() { + Some(x) => x, + None => { + return Ok(DeployFlake { + repo, + node: None, + profile: None, + }) + } + }; + + let mut node_over = false; + + for entry in first_child.children_with_tokens() { + let x: Option = match (entry.kind(), node_over) { + (TOKEN_DOT, false) => { + node_over = true; + None + } + (TOKEN_DOT, true) => { + return Err(ParseFlakeError::PathTooLong); + } + (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), + (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), + (NODE_STRING, _) => { + let c = entry + .into_node() + .unwrap() + .children_with_tokens() + .nth(1) + .unwrap(); + + Some(c.into_token().unwrap().text().to_string()) + } + _ => return Err(ParseFlakeError::Unrecognized), + }; + + if !node_over { + node = x; + } else { + profile = x; + } + } + } + + Ok(DeployFlake { + repo, + node, + profile, + }) +} + +#[test] +fn test_parse_flake() { + assert_eq!( + parse_flake("../deploy/examples/system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("computer".to_string()), + profile: Some("something.nix".to_string()), + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example.com".to_string()), + profile: Some("system".to_string()), + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#example").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example".to_string()), + profile: None + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system#example.system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: Some("example".to_string()), + profile: Some("system".to_string()) + } + ); + + assert_eq!( + parse_flake("../deploy/examples/system").unwrap(), + DeployFlake { + repo: "../deploy/examples/system", + node: None, + profile: None, + } + ); +} + +#[derive(Debug)] +pub struct CmdOverrides { + pub ssh_user: Option, + pub profile_user: Option, + pub ssh_opts: Option, + pub fast_connection: Option, + pub auto_rollback: Option, + pub hostname: Option, + pub magic_rollback: Option, + pub temp_path: Option, + pub confirm_timeout: Option, + pub dry_activate: bool, +} + +#[derive(Debug, Clone)] +pub struct DeployData<'a> { + pub node_name: &'a str, + pub node: &'a settings::Node, + pub profile_name: &'a str, + pub profile: &'a settings::Profile, + + pub cmd_overrides: &'a CmdOverrides, + + pub merged_settings: settings::GenericSettings, + + pub debug_logs: bool, + pub log_dir: Option<&'a str>, +} + +#[derive(Debug)] +pub struct DeployDefs { + pub ssh_user: String, + pub profile_user: String, + pub profile_path: String, + pub sudo: Option, +} + +#[derive(Error, Debug)] +pub enum DeployDataDefsError { + #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] + NoProfileUser(String, String), +} + +impl<'a> DeployData<'a> { + pub fn defs(&'a self) -> Result { + let ssh_user = match self.merged_settings.ssh_user { + Some(ref u) => u.clone(), + None => whoami::username(), + }; + + let profile_user = self.get_profile_user()?; + + let profile_path = self.get_profile_path()?; + + let sudo: Option = match self.merged_settings.user { + Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), + _ => None, + }; + + Ok(DeployDefs { + ssh_user, + profile_user, + profile_path, + sudo, + }) + } + + pub fn get_profile_path(&'a self) -> Result { + let profile_user = self.get_profile_user()?; + let profile_path = match self.profile.profile_settings.profile_path { + None => match &profile_user[..] { + "root" => format!("/nix/var/nix/profiles/{}", self.profile_name), + _ => format!( + "/nix/var/nix/profiles/per-user/{}/{}", + profile_user, self.profile_name + ), + }, + Some(ref x) => x.clone(), + }; + Ok(profile_path) + } + + pub fn get_profile_user(&'a self) -> Result { + let profile_user = match self.merged_settings.user { + Some(ref x) => x.clone(), + None => match self.merged_settings.ssh_user { + Some(ref x) => x.clone(), + None => { + return Err(DeployDataDefsError::NoProfileUser( + self.profile_name.to_owned(), + self.node_name.to_owned(), + )) + } + }, + }; + Ok(profile_user) + } +} + +pub fn make_deploy_data<'a, 's>( + top_settings: &'s settings::GenericSettings, + node: &'a settings::Node, + node_name: &'a str, + profile: &'a settings::Profile, + profile_name: &'a str, + cmd_overrides: &'a CmdOverrides, + debug_logs: bool, + log_dir: Option<&'a str>, +) -> DeployData<'a> { + let mut merged_settings = profile.generic_settings.clone(); + merged_settings.merge(node.generic_settings.clone()); + merged_settings.merge(top_settings.clone()); + + if cmd_overrides.ssh_user.is_some() { + merged_settings.ssh_user = cmd_overrides.ssh_user.clone(); + } + if cmd_overrides.profile_user.is_some() { + merged_settings.user = cmd_overrides.profile_user.clone(); + } + if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { + merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); + } + if let Some(fast_connection) = cmd_overrides.fast_connection { + merged_settings.fast_connection = Some(fast_connection); + } + if let Some(auto_rollback) = cmd_overrides.auto_rollback { + merged_settings.auto_rollback = Some(auto_rollback); + } + if let Some(magic_rollback) = cmd_overrides.magic_rollback { + merged_settings.magic_rollback = Some(magic_rollback); + } + + DeployData { + node_name, + node, + profile_name, + profile, + cmd_overrides, + merged_settings, + debug_logs, + log_dir, + } +} diff --git a/src/deploy.rs b/src/deploy.rs index f8fc2f90..7c1048ea 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -9,7 +9,7 @@ use std::borrow::Cow; use thiserror::Error; use tokio::process::Command; -use crate::DeployDataDefsError; +use crate::data; struct ActivateCommandData<'a> { sudo: &'a Option, @@ -207,8 +207,8 @@ pub enum ConfirmProfileError { } pub async fn confirm_profile( - deploy_data: &super::DeployData<'_>, - deploy_defs: &super::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, temp_path: Cow<'_, str>, ssh_addr: &str, ) -> Result<(), ConfirmProfileError> { @@ -267,8 +267,8 @@ pub enum DeployProfileError { } pub async fn deploy_profile( - deploy_data: &super::DeployData<'_>, - deploy_defs: &super::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, dry_activate: bool, ) -> Result<(), DeployProfileError> { if !dry_activate { @@ -415,11 +415,11 @@ pub enum RevokeProfileError { SSHRevokeExit(Option), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] DeployDataDefsError), + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), } pub async fn revoke( - deploy_data: &crate::DeployData<'_>, - deploy_defs: &crate::DeployDefs, + deploy_data: &data::DeployData<'_>, + deploy_defs: &data::DeployDefs, ) -> Result<(), RevokeProfileError> { let self_revoke_command = build_revoke_command(&RevokeCommandData { sudo: &deploy_defs.sudo, diff --git a/src/lib.rs b/src/lib.rs index 630df179..5cd69f81 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,12 +4,6 @@ // // SPDX-License-Identifier: MPL-2.0 -use rnix::{types::*, SyntaxKind::*}; - -use merge::Merge; - -use thiserror::Error; - use flexi_logger::*; pub fn make_lock_path(temp_path: &str, closure: &str) -> String { @@ -146,295 +140,7 @@ pub fn init_logger( } pub mod settings; +pub mod data; pub mod deploy; pub mod push; pub mod cli; - -#[derive(Debug)] -pub struct CmdOverrides { - pub ssh_user: Option, - pub profile_user: Option, - pub ssh_opts: Option, - pub fast_connection: Option, - pub auto_rollback: Option, - pub hostname: Option, - pub magic_rollback: Option, - pub temp_path: Option, - pub confirm_timeout: Option, - pub dry_activate: bool, -} - -#[derive(PartialEq, Debug)] -pub struct DeployFlake<'a> { - pub repo: &'a str, - pub node: Option, - pub profile: Option, -} - -#[derive(Error, Debug)] -pub enum ParseFlakeError { - #[error("The given path was too long, did you mean to put something in quotes?")] - PathTooLong, - #[error("Unrecognized node or token encountered")] - Unrecognized, -} -pub fn parse_flake(flake: &str) -> Result { - let flake_fragment_start = flake.find('#'); - let (repo, maybe_fragment) = match flake_fragment_start { - Some(s) => (&flake[..s], Some(&flake[s + 1..])), - None => (flake, None), - }; - - let mut node: Option = None; - let mut profile: Option = None; - - if let Some(fragment) = maybe_fragment { - let ast = rnix::parse(fragment); - - let first_child = match ast.root().node().first_child() { - Some(x) => x, - None => { - return Ok(DeployFlake { - repo, - node: None, - profile: None, - }) - } - }; - - let mut node_over = false; - - for entry in first_child.children_with_tokens() { - let x: Option = match (entry.kind(), node_over) { - (TOKEN_DOT, false) => { - node_over = true; - None - } - (TOKEN_DOT, true) => { - return Err(ParseFlakeError::PathTooLong); - } - (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), - (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), - (NODE_STRING, _) => { - let c = entry - .into_node() - .unwrap() - .children_with_tokens() - .nth(1) - .unwrap(); - - Some(c.into_token().unwrap().text().to_string()) - } - _ => return Err(ParseFlakeError::Unrecognized), - }; - - if !node_over { - node = x; - } else { - profile = x; - } - } - } - - Ok(DeployFlake { - repo, - node, - profile, - }) -} - -#[test] -fn test_parse_flake() { - assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("computer".to_string()), - profile: Some("something.nix".to_string()), - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example.com".to_string()), - profile: Some("system".to_string()), - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#example").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example".to_string()), - profile: None - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system#example.system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: Some("example".to_string()), - profile: Some("system".to_string()) - } - ); - - assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", - node: None, - profile: None, - } - ); -} - -#[derive(Debug, Clone)] -pub struct DeployData<'a> { - pub node_name: &'a str, - pub node: &'a settings::Node, - pub profile_name: &'a str, - pub profile: &'a settings::Profile, - - pub cmd_overrides: &'a CmdOverrides, - - pub merged_settings: settings::GenericSettings, - - pub debug_logs: bool, - pub log_dir: Option<&'a str>, -} - -#[derive(Debug)] -pub struct DeployDefs { - pub ssh_user: String, - pub profile_user: String, - pub profile_path: String, - pub sudo: Option, -} - -#[derive(Error, Debug)] -pub enum DeployDataDefsError { - #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] - NoProfileUser(String, String), -} - -impl<'a> DeployData<'a> { - pub fn defs(&'a self) -> Result { - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u.clone(), - None => whoami::username(), - }; - - let profile_user = self.get_profile_user()?; - - let profile_path = self.get_profile_path()?; - - let sudo: Option = match self.merged_settings.user { - Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), - _ => None, - }; - - Ok(DeployDefs { - ssh_user, - profile_user, - profile_path, - sudo, - }) - } - - fn get_profile_path(&'a self) -> Result { - let profile_user = self.get_profile_user()?; - let profile_path = match self.profile.profile_settings.profile_path { - None => match &profile_user[..] { - "root" => format!("/nix/var/nix/profiles/{}", self.profile_name), - _ => format!( - "/nix/var/nix/profiles/per-user/{}/{}", - profile_user, self.profile_name - ), - }, - Some(ref x) => x.clone(), - }; - Ok(profile_path) - } - - fn get_profile_user(&'a self) -> Result { - let profile_user = match self.merged_settings.user { - Some(ref x) => x.clone(), - None => match self.merged_settings.ssh_user { - Some(ref x) => x.clone(), - None => { - return Err(DeployDataDefsError::NoProfileUser( - self.profile_name.to_owned(), - self.node_name.to_owned(), - )) - } - }, - }; - Ok(profile_user) - } -} - -pub fn make_deploy_data<'a, 's>( - top_settings: &'s settings::GenericSettings, - node: &'a settings::Node, - node_name: &'a str, - profile: &'a settings::Profile, - profile_name: &'a str, - cmd_overrides: &'a CmdOverrides, - debug_logs: bool, - log_dir: Option<&'a str>, -) -> DeployData<'a> { - let mut merged_settings = profile.generic_settings.clone(); - merged_settings.merge(node.generic_settings.clone()); - merged_settings.merge(top_settings.clone()); - - if cmd_overrides.ssh_user.is_some() { - merged_settings.ssh_user = cmd_overrides.ssh_user.clone(); - } - if cmd_overrides.profile_user.is_some() { - merged_settings.user = cmd_overrides.profile_user.clone(); - } - if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { - merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); - } - if let Some(fast_connection) = cmd_overrides.fast_connection { - merged_settings.fast_connection = Some(fast_connection); - } - if let Some(auto_rollback) = cmd_overrides.auto_rollback { - merged_settings.auto_rollback = Some(auto_rollback); - } - if let Some(magic_rollback) = cmd_overrides.magic_rollback { - merged_settings.magic_rollback = Some(magic_rollback); - } - - DeployData { - node_name, - node, - profile_name, - profile, - cmd_overrides, - merged_settings, - debug_logs, - log_dir, - } -} diff --git a/src/push.rs b/src/push.rs index 69eba0db..ee55a123 100644 --- a/src/push.rs +++ b/src/push.rs @@ -9,6 +9,8 @@ use std::process::Stdio; use thiserror::Error; use tokio::process::Command; +use crate::data; + #[derive(Error, Debug)] pub enum PushProfileError { #[error("Failed to run Nix show-derivation command: {0}")] @@ -47,8 +49,8 @@ pub struct PushProfileData<'a> { pub supports_flakes: bool, pub check_sigs: bool, pub repo: &'a str, - pub deploy_data: &'a super::DeployData<'a>, - pub deploy_defs: &'a super::DeployDefs, + pub deploy_data: &'a data::DeployData<'a>, + pub deploy_defs: &'a data::DeployDefs, pub keep_result: bool, pub result_path: Option<&'a str>, pub extra_build_args: &'a [String], From e7d950a367b4ba985e038485883c9eb5505e550e Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 7 Aug 2021 15:38:25 -0500 Subject: [PATCH 04/53] Refactor implement from_str trait for `Target` data --- src/cli.rs | 48 ++++++++-------- src/data.rs | 163 ++++++++++++++++++++++++++-------------------------- 2 files changed, 107 insertions(+), 104 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 593ed06b..002f2f84 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -168,7 +168,7 @@ pub enum GetDeploymentDataError { /// Evaluates the Nix in the given `repo` and return the processed Data from it async fn get_deployment_data( supports_flakes: bool, - flakes: &[data::DeployFlake<'_>], + flakes: &[data::Target], extra_build_args: &[String], ) -> Result, GetDeploymentDataError> { futures_util::stream::iter(flakes).then(|flake| async move { @@ -272,7 +272,7 @@ struct PromptPart<'a> { fn print_deployment( parts: &[( - &data::DeployFlake<'_>, + &data::Target, data::DeployData, data::DeployDefs, )], @@ -315,7 +315,7 @@ pub enum PromptDeploymentError { fn prompt_deployment( parts: &[( - &data::DeployFlake<'_>, + &data::Target, data::DeployData, data::DeployDefs, )], @@ -388,14 +388,14 @@ pub enum RunDeployError { } type ToDeploy<'a> = Vec<( - &'a data::DeployFlake<'a>, + &'a data::Target, &'a settings::Root, (&'a str, &'a settings::Node), (&'a str, &'a settings::Profile), )>; async fn run_deploy( - deploy_flakes: Vec>, + deploy_targets: Vec, data: Vec, supports_flakes: bool, check_sigs: bool, @@ -409,11 +409,11 @@ async fn run_deploy( log_dir: &Option, rollback_succeeded: bool, ) -> Result<(), RunDeployError> { - let to_deploy: ToDeploy = deploy_flakes + let to_deploy: ToDeploy = deploy_targets .iter() .zip(&data) - .map(|(deploy_flake, data)| { - let to_deploys: ToDeploy = match (&deploy_flake.node, &deploy_flake.profile) { + .map(|(deploy_target, data)| { + let to_deploys: ToDeploy = match (&deploy_target.node, &deploy_target.profile) { (Some(node_name), Some(profile_name)) => { let node = match data.nodes.get(node_name) { Some(x) => x, @@ -425,7 +425,7 @@ async fn run_deploy( }; vec![( - deploy_flake, + deploy_target, data, (node_name.as_str(), node), (profile_name.as_str(), profile), @@ -459,7 +459,7 @@ async fn run_deploy( profiles_list .into_iter() - .map(|x| (deploy_flake, data, (node_name.as_str(), node), x)) + .map(|x| (deploy_target, data, (node_name.as_str(), node), x)) .collect() } (None, None) => { @@ -490,7 +490,7 @@ async fn run_deploy( let ll: ToDeploy = profiles_list .into_iter() - .map(|x| (deploy_flake, data, (node_name.as_str(), node), x)) + .map(|x| (deploy_target, data, (node_name.as_str(), node), x)) .collect(); l.extend(ll); @@ -508,12 +508,12 @@ async fn run_deploy( .collect(); let mut parts: Vec<( - &data::DeployFlake<'_>, + &data::Target, data::DeployData, data::DeployDefs, )> = Vec::new(); - for (deploy_flake, data, (node_name, node), (profile_name, profile)) in to_deploy { + for (deploy_target, data, (node_name, node), (profile_name, profile)) in to_deploy { let deploy_data = data::make_deploy_data( &data.generic_settings, node, @@ -527,7 +527,7 @@ async fn run_deploy( let deploy_defs = deploy_data.defs()?; - parts.push((deploy_flake, deploy_data, deploy_defs)); + parts.push((deploy_target, deploy_data, deploy_defs)); } if interactive { @@ -536,11 +536,11 @@ async fn run_deploy( print_deployment(&parts[..])?; } - for (deploy_flake, deploy_data, deploy_defs) in &parts { + for (deploy_target, deploy_data, deploy_defs) in &parts { deploy::push::push_profile(deploy::push::PushProfileData { supports_flakes, check_sigs, - repo: deploy_flake.repo, + repo: &deploy_target.repo, deploy_data, deploy_defs, keep_result, @@ -595,7 +595,7 @@ pub enum RunError { #[error("Failed to evaluate deployment data: {0}")] GetDeploymentData(#[from] GetDeploymentDataError), #[error("Error parsing flake: {0}")] - ParseFlake(#[from] data::ParseFlakeError), + ParseFlake(#[from] data::ParseTargetError), #[error("Error initiating logger: {0}")] Logger(#[from] flexi_logger::FlexiLoggerError), #[error("{0}")] @@ -619,10 +619,10 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { .targets .unwrap_or_else(|| vec![opts.clone().target.unwrap_or_else(|| ".".to_string())]); - let deploy_flakes: Vec = deploys + let deploy_targets: Vec = deploys .iter() - .map(|f| data::parse_flake(f.as_str())) - .collect::, data::ParseFlakeError>>()?; + .map(|f| f.parse::()) + .collect::, data::ParseTargetError>>()?; let cmd_overrides = data::CmdOverrides { ssh_user: opts.ssh_user, @@ -644,14 +644,14 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { } if !opts.skip_checks { - for deploy_flake in &deploy_flakes { - check_deployment(supports_flakes, deploy_flake.repo, &opts.extra_build_args).await?; + for deploy_target in deploy_targets.iter() { + check_deployment(supports_flakes, &deploy_target.repo, &opts.extra_build_args).await?; } } let result_path = opts.result_path.as_deref(); - let data = get_deployment_data(supports_flakes, &deploy_flakes, &opts.extra_build_args).await?; + let data = get_deployment_data(supports_flakes, &deploy_targets, &opts.extra_build_args).await?; run_deploy( - deploy_flakes, + deploy_targets, data, supports_flakes, opts.checksigs, diff --git a/src/data.rs b/src/data.rs index 86e1b6cd..9de663ef 100644 --- a/src/data.rs +++ b/src/data.rs @@ -10,145 +10,148 @@ use thiserror::Error; use crate::settings; #[derive(PartialEq, Debug)] -pub struct DeployFlake<'a> { - pub repo: &'a str, +pub struct Target { + pub repo: String, pub node: Option, pub profile: Option, } #[derive(Error, Debug)] -pub enum ParseFlakeError { +pub enum ParseTargetError { #[error("The given path was too long, did you mean to put something in quotes?")] PathTooLong, #[error("Unrecognized node or token encountered")] Unrecognized, } - -pub fn parse_flake(flake: &str) -> Result { - let flake_fragment_start = flake.find('#'); - let (repo, maybe_fragment) = match flake_fragment_start { - Some(s) => (&flake[..s], Some(&flake[s + 1..])), - None => (flake, None), - }; - - let mut node: Option = None; - let mut profile: Option = None; - - if let Some(fragment) = maybe_fragment { - let ast = rnix::parse(fragment); - - let first_child = match ast.root().node().first_child() { - Some(x) => x, - None => { - return Ok(DeployFlake { - repo, - node: None, - profile: None, - }) - } +impl std::str::FromStr for Target { + type Err = ParseTargetError; + + fn from_str(s: &str) -> Result { + let flake_fragment_start = s.find('#'); + let (repo, maybe_fragment) = match flake_fragment_start { + Some(i) => (s[..i].to_string(), Some(&s[i + 1..])), + None => (s.to_string(), None), }; - let mut node_over = false; + let mut node: Option = None; + let mut profile: Option = None; - for entry in first_child.children_with_tokens() { - let x: Option = match (entry.kind(), node_over) { - (TOKEN_DOT, false) => { - node_over = true; - None - } - (TOKEN_DOT, true) => { - return Err(ParseFlakeError::PathTooLong); - } - (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), - (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), - (NODE_STRING, _) => { - let c = entry - .into_node() - .unwrap() - .children_with_tokens() - .nth(1) - .unwrap(); - - Some(c.into_token().unwrap().text().to_string()) + if let Some(fragment) = maybe_fragment { + let ast = rnix::parse(fragment); + + let first_child = match ast.root().node().first_child() { + Some(x) => x, + None => { + return Ok(Target { + repo, + node: None, + profile: None, + }) } - _ => return Err(ParseFlakeError::Unrecognized), }; - if !node_over { - node = x; - } else { - profile = x; + let mut node_over = false; + + for entry in first_child.children_with_tokens() { + let x: Option = match (entry.kind(), node_over) { + (TOKEN_DOT, false) => { + node_over = true; + None + } + (TOKEN_DOT, true) => { + return Err(ParseTargetError::PathTooLong); + } + (NODE_IDENT, _) => Some(entry.into_node().unwrap().text().to_string()), + (TOKEN_IDENT, _) => Some(entry.into_token().unwrap().text().to_string()), + (NODE_STRING, _) => { + let c = entry + .into_node() + .unwrap() + .children_with_tokens() + .nth(1) + .unwrap(); + + Some(c.into_token().unwrap().text().to_string()) + } + _ => return Err(ParseTargetError::Unrecognized), + }; + + if !node_over { + node = x; + } else { + profile = x; + } } } - } - Ok(DeployFlake { - repo, - node, - profile, - }) + Ok(Target { + repo, + node, + profile, + }) + } } #[test] -fn test_parse_flake() { +fn test_deploy_target_from_str() { assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: None, profile: None, } ); assert_eq!( - parse_flake("../deploy/examples/system#").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system#".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: None, profile: None, } ); assert_eq!( - parse_flake("../deploy/examples/system#computer.\"something.nix\"").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system#computer.\"something.nix\"".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: Some("computer".to_string()), profile: Some("something.nix".to_string()), } ); assert_eq!( - parse_flake("../deploy/examples/system#\"example.com\".system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system#\"example.com\".system".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: Some("example.com".to_string()), profile: Some("system".to_string()), } ); assert_eq!( - parse_flake("../deploy/examples/system#example").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system#example".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: Some("example".to_string()), profile: None } ); assert_eq!( - parse_flake("../deploy/examples/system#example.system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system#example.system".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: Some("example".to_string()), profile: Some("system".to_string()) } ); assert_eq!( - parse_flake("../deploy/examples/system").unwrap(), - DeployFlake { - repo: "../deploy/examples/system", + "../deploy/examples/system".parse::().unwrap(), + Target { + repo: "../deploy/examples/system".to_string(), node: None, profile: None, } From 5fcece6070b0a8147b38eb466b5e933df1c0e774 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sun, 8 Aug 2021 13:09:18 -0500 Subject: [PATCH 05/53] Refactor move flake parsing into adapter file --- src/cli.rs | 165 ++------------------------------------------------ src/flake.rs | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 + 3 files changed, 173 insertions(+), 160 deletions(-) create mode 100644 src/flake.rs diff --git a/src/cli.rs b/src/cli.rs index 002f2f84..c7d68a46 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,8 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{data, settings}; -use futures_util::stream::{StreamExt, TryStreamExt}; +use self::deploy::{data, settings, flake}; use log::{debug, error, info, warn}; use serde::Serialize; use std::process::Stdio; @@ -107,160 +106,6 @@ async fn test_flake_support() -> Result { .success()) } -#[derive(Error, Debug)] -pub enum CheckDeploymentError { - #[error("Failed to execute Nix checking command: {0}")] - NixCheck(#[from] std::io::Error), - #[error("Nix checking command resulted in a bad exit code: {0:?}")] - NixCheckExit(Option), -} - -async fn check_deployment( - supports_flakes: bool, - repo: &str, - extra_build_args: &[String], -) -> Result<(), CheckDeploymentError> { - info!("Running checks for flake in {}", repo); - - let mut check_command = match supports_flakes { - true => Command::new("nix"), - false => Command::new("nix-build"), - }; - - if supports_flakes { - check_command.arg("flake").arg("check").arg(repo); - } else { - check_command.arg("-E") - .arg("--no-out-link") - .arg(format!("let r = import {}/.; x = (if builtins.isFunction r then (r {{}}) else r); in if x ? checks then x.checks.${{builtins.currentSystem}} else {{}}", repo)); - } - - for extra_arg in extra_build_args { - check_command.arg(extra_arg); - } - - let check_status = check_command.status().await?; - - match check_status.code() { - Some(0) => (), - a => return Err(CheckDeploymentError::NixCheckExit(a)), - }; - - Ok(()) -} - -#[derive(Error, Debug)] -pub enum GetDeploymentDataError { - #[error("Failed to execute nix eval command: {0}")] - NixEval(std::io::Error), - #[error("Failed to read output from evaluation: {0}")] - NixEvalOut(std::io::Error), - #[error("Evaluation resulted in a bad exit code: {0:?}")] - NixEvalExit(Option), - #[error("Error converting evaluation output to utf8: {0}")] - DecodeUtf8(#[from] std::string::FromUtf8Error), - #[error("Error decoding the JSON from evaluation: {0}")] - DecodeJson(#[from] serde_json::error::Error), - #[error("Impossible happened: profile is set but node is not")] - ProfileNoNode, -} - -/// Evaluates the Nix in the given `repo` and return the processed Data from it -async fn get_deployment_data( - supports_flakes: bool, - flakes: &[data::Target], - extra_build_args: &[String], -) -> Result, GetDeploymentDataError> { - futures_util::stream::iter(flakes).then(|flake| async move { - - info!("Evaluating flake in {}", flake.repo); - - let mut c = if supports_flakes { - Command::new("nix") - } else { - Command::new("nix-instantiate") - }; - - if supports_flakes { - c.arg("eval") - .arg("--json") - .arg(format!("{}#deploy", flake.repo)) - // We use --apply instead of --expr so that we don't have to deal with builtins.getFlake - .arg("--apply"); - match (&flake.node, &flake.profile) { - (Some(node), Some(profile)) => { - // Ignore all nodes and all profiles but the one we're evaluating - c.arg(format!( - r#" - deploy: - (deploy // {{ - nodes = {{ - "{0}" = deploy.nodes."{0}" // {{ - profiles = {{ - inherit (deploy.nodes."{0}".profiles) "{1}"; - }}; - }}; - }}; - }}) - "#, - node, profile - )) - } - (Some(node), None) => { - // Ignore all nodes but the one we're evaluating - c.arg(format!( - r#" - deploy: - (deploy // {{ - nodes = {{ - inherit (deploy.nodes) "{}"; - }}; - }}) - "#, - node - )) - } - (None, None) => { - // We need to evaluate all profiles of all nodes anyway, so just do it strictly - c.arg("deploy: deploy") - } - (None, Some(_)) => return Err(GetDeploymentDataError::ProfileNoNode), - } - } else { - c - .arg("--strict") - .arg("--read-write-mode") - .arg("--json") - .arg("--eval") - .arg("-E") - .arg(format!("let r = import {}/.; in if builtins.isFunction r then (r {{}}).deploy else r.deploy", flake.repo)) - }; - - for extra_arg in extra_build_args { - c.arg(extra_arg); - } - - let build_child = c - .stdout(Stdio::piped()) - .spawn() - .map_err(GetDeploymentDataError::NixEval)?; - - let build_output = build_child - .wait_with_output() - .await - .map_err(GetDeploymentDataError::NixEvalOut)?; - - match build_output.status.code() { - Some(0) => (), - a => return Err(GetDeploymentDataError::NixEvalExit(a)), - }; - - let data_json = String::from_utf8(build_output.stdout)?; - - Ok(serde_json::from_str(&data_json)?) -}).try_collect().await -} - #[derive(Serialize)] struct PromptPart<'a> { user: &'a str, @@ -591,9 +436,9 @@ pub enum RunError { #[error("Failed to test for flake support: {0}")] FlakeTest(std::io::Error), #[error("Failed to check deployment: {0}")] - CheckDeployment(#[from] CheckDeploymentError), + CheckDeployment(#[from] flake::CheckDeploymentError), #[error("Failed to evaluate deployment data: {0}")] - GetDeploymentData(#[from] GetDeploymentDataError), + GetDeploymentData(#[from] flake::GetDeploymentDataError), #[error("Error parsing flake: {0}")] ParseFlake(#[from] data::ParseTargetError), #[error("Error initiating logger: {0}")] @@ -645,11 +490,11 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { if !opts.skip_checks { for deploy_target in deploy_targets.iter() { - check_deployment(supports_flakes, &deploy_target.repo, &opts.extra_build_args).await?; + flake::check_deployment(supports_flakes, &deploy_target.repo, &opts.extra_build_args).await?; } } let result_path = opts.result_path.as_deref(); - let data = get_deployment_data(supports_flakes, &deploy_targets, &opts.extra_build_args).await?; + let data = flake::get_deployment_data(supports_flakes, &deploy_targets, &opts.extra_build_args).await?; run_deploy( deploy_targets, data, diff --git a/src/flake.rs b/src/flake.rs new file mode 100644 index 00000000..22b6de23 --- /dev/null +++ b/src/flake.rs @@ -0,0 +1,167 @@ +// SPDX-FileCopyrightText: 2020 Serokell +// SPDX-FileCopyrightText: 2021 Yannik Sander +// +// SPDX-License-Identifier: MPL-2.0 + +use crate as deploy; + +use self::deploy::{data, settings}; +use log::{error, info}; +use std::process::Stdio; +use futures_util::stream::{StreamExt, TryStreamExt}; +use thiserror::Error; +use tokio::process::Command; + +#[derive(Error, Debug)] +pub enum CheckDeploymentError { + #[error("Failed to execute Nix checking command: {0}")] + NixCheck(#[from] std::io::Error), + #[error("Nix checking command resulted in a bad exit code: {0:?}")] + NixCheckExit(Option), +} + +pub async fn check_deployment( + supports_flakes: bool, + repo: &str, + extra_build_args: &[String], +) -> Result<(), CheckDeploymentError> { + info!("Running checks for flake in {}", repo); + + let mut check_command = match supports_flakes { + true => Command::new("nix"), + false => Command::new("nix-build"), + }; + + if supports_flakes { + check_command.arg("flake").arg("check").arg(repo); + } else { + check_command.arg("-E") + .arg("--no-out-link") + .arg(format!("let r = import {}/.; x = (if builtins.isFunction r then (r {{}}) else r); in if x ? checks then x.checks.${{builtins.currentSystem}} else {{}}", repo)); + }; + + for extra_arg in extra_build_args { + check_command.arg(extra_arg); + } + + let check_status = check_command.status().await?; + + match check_status.code() { + Some(0) => (), + a => return Err(CheckDeploymentError::NixCheckExit(a)), + }; + + Ok(()) +} + +#[derive(Error, Debug)] +pub enum GetDeploymentDataError { + #[error("Failed to execute nix eval command: {0}")] + NixEval(std::io::Error), + #[error("Failed to read output from evaluation: {0}")] + NixEvalOut(std::io::Error), + #[error("Evaluation resulted in a bad exit code: {0:?}")] + NixEvalExit(Option), + #[error("Error converting evaluation output to utf8: {0}")] + DecodeUtf8(#[from] std::string::FromUtf8Error), + #[error("Error decoding the JSON from evaluation: {0}")] + DecodeJson(#[from] serde_json::error::Error), + #[error("Impossible happened: profile is set but node is not")] + ProfileNoNode, +} + +/// Evaluates the Nix in the given `repo` and return the processed Data from it +pub async fn get_deployment_data( + supports_flakes: bool, + flakes: &[data::Target], + extra_build_args: &[String], +) -> Result, GetDeploymentDataError> { + futures_util::stream::iter(flakes).then(|flake| async move { + + info!("Evaluating flake in {}", flake.repo); + + let mut c = if supports_flakes { + Command::new("nix") + } else { + Command::new("nix-instantiate") + }; + + if supports_flakes { + c.arg("eval") + .arg("--json") + .arg(format!("{}#deploy", flake.repo)) + // We use --apply instead of --expr so that we don't have to deal with builtins.getFlake + .arg("--apply"); + match (&flake.node, &flake.profile) { + (Some(node), Some(profile)) => { + // Ignore all nodes and all profiles but the one we're evaluating + c.arg(format!( + r#" + deploy: + (deploy // {{ + nodes = {{ + "{0}" = deploy.nodes."{0}" // {{ + profiles = {{ + inherit (deploy.nodes."{0}".profiles) "{1}"; + }}; + }}; + }}; + }}) + "#, + node, profile + )) + } + (Some(node), None) => { + // Ignore all nodes but the one we're evaluating + c.arg(format!( + r#" + deploy: + (deploy // {{ + nodes = {{ + inherit (deploy.nodes) "{}"; + }}; + }}) + "#, + node + )) + } + (None, None) => { + // We need to evaluate all profiles of all nodes anyway, so just do it strictly + c.arg("deploy: deploy") + } + (None, Some(_)) => return Err(GetDeploymentDataError::ProfileNoNode), + } + } else { + c + .arg("--strict") + .arg("--read-write-mode") + .arg("--json") + .arg("--eval") + .arg("-E") + .arg(format!("let r = import {}/.; in if builtins.isFunction r then (r {{}}).deploy else r.deploy", flake.repo)) + }; + + for extra_arg in extra_build_args { + c.arg(extra_arg); + } + + let build_child = c + .stdout(Stdio::piped()) + .spawn() + .map_err(GetDeploymentDataError::NixEval)?; + + let build_output = build_child + .wait_with_output() + .await + .map_err(GetDeploymentDataError::NixEvalOut)?; + + match build_output.status.code() { + Some(0) => (), + a => return Err(GetDeploymentDataError::NixEvalExit(a)), + }; + + let data_json = String::from_utf8(build_output.stdout)?; + + Ok(serde_json::from_str(&data_json)?) +}).try_collect().await +} diff --git a/src/lib.rs b/src/lib.rs index 5cd69f81..e530a8b8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -141,6 +141,7 @@ pub fn init_logger( pub mod settings; pub mod data; +pub mod flake; pub mod deploy; pub mod push; pub mod cli; From 70278794484d3645faaf88473d395d84e79e21f5 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sun, 8 Aug 2021 13:18:41 -0500 Subject: [PATCH 06/53] Refactor homologate data structures --- src/cli.rs | 192 ++++++++++++++---------------------------------- src/data.rs | 96 ++++++++++++------------ src/deploy.rs | 24 +++--- src/push.rs | 16 ++-- src/settings.rs | 18 ++++- 5 files changed, 145 insertions(+), 201 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index c7d68a46..51d2f1d1 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -28,66 +28,16 @@ pub struct Opts { /// A list of flakes to deploy alternatively #[clap(long, group = "deploy")] targets: Option>, - /// Check signatures when using `nix copy` - #[clap(short, long)] - checksigs: bool, - /// Use the interactive prompt before deployment - #[clap(short, long)] - interactive: bool, - /// Extra arguments to be passed to nix build - extra_build_args: Vec, - - /// Print debug logs to output - #[clap(short, long)] - debug_logs: bool, - /// Directory to print logs to (including the background activation process) - #[clap(long)] - log_dir: Option, - - /// Keep the build outputs of each built profile - #[clap(short, long)] - keep_result: bool, - /// Location to keep outputs from built profiles in - #[clap(short, long)] - result_path: Option, - /// Skip the automatic pre-build checks - #[clap(short, long)] - skip_checks: bool, - - /// Override the SSH user with the given value - #[clap(long)] - ssh_user: Option, - /// Override the profile user with the given value - #[clap(long)] - profile_user: Option, - /// Override the SSH options used - #[clap(long)] - ssh_opts: Option, - /// Override if the connecting to the target node should be considered fast - #[clap(long)] - fast_connection: Option, - /// Override if a rollback should be attempted if activation fails - #[clap(long)] - auto_rollback: Option, /// Override hostname used for the node #[clap(long)] hostname: Option, - /// Make activation wait for confirmation, or roll back after a period of time - #[clap(long)] - magic_rollback: Option, - /// How long activation should wait for confirmation (if using magic-rollback) - #[clap(long)] - confirm_timeout: Option, - /// Where to store temporary files (only used by magic-rollback) - #[clap(long)] - temp_path: Option, - /// Show what will be activated on the machines - #[clap(long)] - dry_activate: bool, - /// Revoke all previously succeeded deploys when deploying multiple profiles - #[clap(long)] - rollback_succeeded: Option, + + #[clap(flatten)] + flags: data::Flags, + + #[clap(flatten)] + generic_settings: settings::GenericSettings, } /// Returns if the available Nix installation supports flakes @@ -240,27 +190,20 @@ type ToDeploy<'a> = Vec<( )>; async fn run_deploy( - deploy_targets: Vec, - data: Vec, + targets: Vec, + settings: Vec, supports_flakes: bool, - check_sigs: bool, - interactive: bool, - cmd_overrides: &data::CmdOverrides, - keep_result: bool, - result_path: Option<&str>, - extra_build_args: &[String], - debug_logs: bool, - dry_activate: bool, - log_dir: &Option, - rollback_succeeded: bool, + hostname: Option, + cmd_settings: settings::GenericSettings, + cmd_flags: data::Flags, ) -> Result<(), RunDeployError> { - let to_deploy: ToDeploy = deploy_targets + let to_deploy: ToDeploy = targets .iter() - .zip(&data) - .map(|(deploy_target, data)| { - let to_deploys: ToDeploy = match (&deploy_target.node, &deploy_target.profile) { + .zip(&settings) + .map(|(target, root)| { + let to_deploys: ToDeploy = match (&target.node, &target.profile) { (Some(node_name), Some(profile_name)) => { - let node = match data.nodes.get(node_name) { + let node = match root.nodes.get(node_name) { Some(x) => x, None => return Err(RunDeployError::NodeNotFound(node_name.clone())), }; @@ -270,14 +213,14 @@ async fn run_deploy( }; vec![( - deploy_target, - data, + &target, + &root, (node_name.as_str(), node), (profile_name.as_str(), profile), )] } (Some(node_name), None) => { - let node = match data.nodes.get(node_name) { + let node = match root.nodes.get(node_name) { Some(x) => x, None => return Err(RunDeployError::NodeNotFound(node_name.clone())), }; @@ -304,13 +247,13 @@ async fn run_deploy( profiles_list .into_iter() - .map(|x| (deploy_target, data, (node_name.as_str(), node), x)) + .map(|x| (target, root, (node_name.as_str(), node), x)) .collect() } (None, None) => { let mut l = Vec::new(); - for (node_name, node) in &data.nodes { + for (node_name, node) in &root.nodes { let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); for profile_name in [ @@ -335,7 +278,7 @@ async fn run_deploy( let ll: ToDeploy = profiles_list .into_iter() - .map(|x| (deploy_target, data, (node_name.as_str(), node), x)) + .map(|x| (target, root, (node_name.as_str(), node), x)) .collect(); l.extend(ll); @@ -358,39 +301,39 @@ async fn run_deploy( data::DeployDefs, )> = Vec::new(); - for (deploy_target, data, (node_name, node), (profile_name, profile)) in to_deploy { + for (target, root, (node_name, node), (profile_name, profile)) in to_deploy { let deploy_data = data::make_deploy_data( - &data.generic_settings, + &root.generic_settings, + &cmd_settings, + &cmd_flags, node, node_name, profile, profile_name, - cmd_overrides, - debug_logs, - log_dir.as_deref(), + hostname.as_deref(), ); let deploy_defs = deploy_data.defs()?; - parts.push((deploy_target, deploy_data, deploy_defs)); + parts.push((target, deploy_data, deploy_defs)); } - if interactive { + if cmd_flags.interactive { prompt_deployment(&parts[..])?; } else { print_deployment(&parts[..])?; } - for (deploy_target, deploy_data, deploy_defs) in &parts { + for (target, deploy_data, deploy_defs) in &parts { deploy::push::push_profile(deploy::push::PushProfileData { - supports_flakes, - check_sigs, - repo: &deploy_target.repo, - deploy_data, - deploy_defs, - keep_result, - result_path, - extra_build_args, + supports_flakes: &supports_flakes, + check_sigs: &cmd_flags.checksigs, + repo: &target.repo, + deploy_data: &deploy_data, + deploy_defs: &deploy_defs, + keep_result: &cmd_flags.keep_result, + result_path: cmd_flags.result_path.as_deref(), + extra_build_args: &cmd_flags.extra_build_args, }) .await?; } @@ -402,14 +345,14 @@ async fn run_deploy( // Rollbacks adhere to the global seeting to auto_rollback and secondary // the profile's configuration for (_, deploy_data, deploy_defs) in &parts { - if let Err(e) = deploy::deploy::deploy_profile(deploy_data, deploy_defs, dry_activate).await + if let Err(e) = deploy::deploy::deploy_profile(deploy_data, deploy_defs, cmd_flags.dry_activate).await { error!("{}", e); - if dry_activate { + if cmd_flags.dry_activate { info!("dry run, not rolling back"); } info!("Revoking previous deploys"); - if rollback_succeeded && cmd_overrides.auto_rollback.unwrap_or(true) { + if cmd_flags.rollback_succeeded && cmd_settings.auto_rollback.unwrap_or(true) { // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) @@ -454,8 +397,8 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { }; deploy::init_logger( - opts.debug_logs, - opts.log_dir.as_deref(), + opts.flags.debug_logs, + opts.flags.log_dir.as_deref(), &deploy::LoggerType::Deploy, )?; @@ -464,51 +407,30 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { .targets .unwrap_or_else(|| vec![opts.clone().target.unwrap_or_else(|| ".".to_string())]); - let deploy_targets: Vec = deploys - .iter() - .map(|f| f.parse::()) - .collect::, data::ParseTargetError>>()?; - - let cmd_overrides = data::CmdOverrides { - ssh_user: opts.ssh_user, - profile_user: opts.profile_user, - ssh_opts: opts.ssh_opts, - fast_connection: opts.fast_connection, - auto_rollback: opts.auto_rollback, - hostname: opts.hostname, - magic_rollback: opts.magic_rollback, - temp_path: opts.temp_path, - confirm_timeout: opts.confirm_timeout, - dry_activate: opts.dry_activate, - }; - let supports_flakes = test_flake_support().await.map_err(RunError::FlakeTest)?; if !supports_flakes { warn!("A Nix version without flakes support was detected, support for this is work in progress"); } - if !opts.skip_checks { - for deploy_target in deploy_targets.iter() { - flake::check_deployment(supports_flakes, &deploy_target.repo, &opts.extra_build_args).await?; + let targets: Vec = deploys + .into_iter() + .map(|f| f.parse::()) + .collect::, data::ParseTargetError>>()?; + + if !opts.flags.skip_checks { + for target in targets.iter() { + flake::check_deployment(supports_flakes, &target.repo, &opts.flags.extra_build_args).await?; } } - let result_path = opts.result_path.as_deref(); - let data = flake::get_deployment_data(supports_flakes, &deploy_targets, &opts.extra_build_args).await?; + let settings = flake::get_deployment_data(supports_flakes, &targets, &opts.flags.extra_build_args).await?; run_deploy( - deploy_targets, - data, + targets, + settings, supports_flakes, - opts.checksigs, - opts.interactive, - &cmd_overrides, - opts.keep_result, - result_path, - &opts.extra_build_args, - opts.debug_logs, - opts.dry_activate, - &opts.log_dir, - opts.rollback_succeeded.unwrap_or(true), + opts.hostname, + opts.generic_settings, + opts.flags, ) .await?; diff --git a/src/data.rs b/src/data.rs index 9de663ef..456a9b9a 100644 --- a/src/data.rs +++ b/src/data.rs @@ -6,6 +6,7 @@ use rnix::{types::*, SyntaxKind::*}; use merge::Merge; use thiserror::Error; +use clap::Clap; use crate::settings; @@ -158,33 +159,53 @@ fn test_deploy_target_from_str() { ); } -#[derive(Debug)] -pub struct CmdOverrides { - pub ssh_user: Option, - pub profile_user: Option, - pub ssh_opts: Option, - pub fast_connection: Option, - pub auto_rollback: Option, - pub hostname: Option, - pub magic_rollback: Option, - pub temp_path: Option, - pub confirm_timeout: Option, - pub dry_activate: bool, -} - #[derive(Debug, Clone)] pub struct DeployData<'a> { pub node_name: &'a str, pub node: &'a settings::Node, pub profile_name: &'a str, pub profile: &'a settings::Profile, + pub hostname: Option<&'a str>, - pub cmd_overrides: &'a CmdOverrides, - + pub flags: &'a Flags, pub merged_settings: settings::GenericSettings, +} - pub debug_logs: bool, - pub log_dir: Option<&'a str>, +#[derive(Clap, Debug, Clone)] +pub struct Flags { + /// Check signatures when using `nix copy` + #[clap(short, long)] + pub checksigs: bool, + /// Use the interactive prompt before deployment + #[clap(short, long)] + pub interactive: bool, + /// Extra arguments to be passed to nix build + pub extra_build_args: Vec, + + /// Print debug logs to output + #[clap(short, long)] + pub debug_logs: bool, + /// Directory to print logs to (including the background activation process) + #[clap(long)] + pub log_dir: Option, + + /// Keep the build outputs of each built profile + #[clap(short, long)] + pub keep_result: bool, + /// Location to keep outputs from built profiles in + #[clap(short, long)] + pub result_path: Option, + + /// Skip the automatic pre-build checks + #[clap(short, long)] + pub skip_checks: bool, + /// Make activation wait for confirmation, or roll back after a period of time + /// Show what will be activated on the machines + #[clap(long)] + pub dry_activate: bool, + /// Revoke all previously succeeded deploys when deploying multiple profiles + #[clap(long)] + pub rollback_succeeded: bool, } #[derive(Debug)] @@ -257,47 +278,32 @@ impl<'a> DeployData<'a> { } } -pub fn make_deploy_data<'a, 's>( - top_settings: &'s settings::GenericSettings, +pub fn make_deploy_data<'a>( + top_settings: &'a settings::GenericSettings, + cmd_settings: &'a settings::GenericSettings, + flags: &'a Flags, node: &'a settings::Node, node_name: &'a str, profile: &'a settings::Profile, profile_name: &'a str, - cmd_overrides: &'a CmdOverrides, - debug_logs: bool, - log_dir: Option<&'a str>, + hostname: Option<&'a str>, ) -> DeployData<'a> { - let mut merged_settings = profile.generic_settings.clone(); + let mut merged_settings = cmd_settings.clone(); + merged_settings.merge(profile.generic_settings.clone()); merged_settings.merge(node.generic_settings.clone()); merged_settings.merge(top_settings.clone()); - if cmd_overrides.ssh_user.is_some() { - merged_settings.ssh_user = cmd_overrides.ssh_user.clone(); - } - if cmd_overrides.profile_user.is_some() { - merged_settings.user = cmd_overrides.profile_user.clone(); - } - if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { - merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); - } - if let Some(fast_connection) = cmd_overrides.fast_connection { - merged_settings.fast_connection = Some(fast_connection); - } - if let Some(auto_rollback) = cmd_overrides.auto_rollback { - merged_settings.auto_rollback = Some(auto_rollback); - } - if let Some(magic_rollback) = cmd_overrides.magic_rollback { - merged_settings.magic_rollback = Some(magic_rollback); - } + // if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { + // merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); + // } DeployData { node_name, node, profile_name, profile, - cmd_overrides, + hostname, + flags, merged_settings, - debug_logs, - log_dir, } } diff --git a/src/deploy.rs b/src/deploy.rs index 7c1048ea..906ff4e0 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -297,16 +297,16 @@ pub async fn deploy_profile( temp_path: &temp_path, confirm_timeout, magic_rollback, - debug_logs: deploy_data.debug_logs, - log_dir: deploy_data.log_dir, + debug_logs: deploy_data.flags.debug_logs, + log_dir: deploy_data.flags.log_dir.as_deref(), dry_activate, }); debug!("Constructed activation command: {}", self_activate_command); - let hostname = match deploy_data.cmd_overrides.hostname { - Some(ref x) => x, - None => &deploy_data.node.node_settings.hostname, + let hostname = match deploy_data.hostname { + Some(x) => x, + None => deploy_data.node.node_settings.hostname.as_str(), }; let ssh_addr = format!("{}@{}", deploy_defs.ssh_user, hostname); @@ -340,8 +340,8 @@ pub async fn deploy_profile( sudo: &deploy_defs.sudo, closure: &deploy_data.profile.profile_settings.path, temp_path: &temp_path, - debug_logs: deploy_data.debug_logs, - log_dir: deploy_data.log_dir, + debug_logs: deploy_data.flags.debug_logs, + log_dir: deploy_data.flags.log_dir.as_deref(), }); debug!("Constructed wait command: {}", self_wait_command); @@ -425,15 +425,15 @@ pub async fn revoke( sudo: &deploy_defs.sudo, closure: &deploy_data.profile.profile_settings.path, profile_path: &deploy_data.get_profile_path()?, - debug_logs: deploy_data.debug_logs, - log_dir: deploy_data.log_dir, + debug_logs: deploy_data.flags.debug_logs, + log_dir: deploy_data.flags.log_dir.as_deref(), }); debug!("Constructed revoke command: {}", self_revoke_command); - let hostname = match deploy_data.cmd_overrides.hostname { - Some(ref x) => x, - None => &deploy_data.node.node_settings.hostname, + let hostname = match deploy_data.hostname { + Some(x) => x, + None => deploy_data.node.node_settings.hostname.as_str(), }; let ssh_addr = format!("{}@{}", deploy_defs.ssh_user, hostname); diff --git a/src/push.rs b/src/push.rs index ee55a123..d7966d27 100644 --- a/src/push.rs +++ b/src/push.rs @@ -46,12 +46,12 @@ pub enum PushProfileError { } pub struct PushProfileData<'a> { - pub supports_flakes: bool, - pub check_sigs: bool, + pub supports_flakes: &'a bool, + pub check_sigs: &'a bool, pub repo: &'a str, pub deploy_data: &'a data::DeployData<'a>, pub deploy_defs: &'a data::DeployDefs, - pub keep_result: bool, + pub keep_result: &'a bool, pub result_path: Option<&'a str>, pub extra_build_args: &'a [String], } @@ -95,13 +95,13 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr data.deploy_data.profile_name, data.deploy_data.node_name ); - let mut build_command = if data.supports_flakes { + let mut build_command = if *data.supports_flakes { Command::new("nix") } else { Command::new("nix-build") }; - if data.supports_flakes { + if *data.supports_flakes { build_command.arg("build").arg(derivation_name) } else { build_command.arg(derivation_name) @@ -208,9 +208,9 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr // .collect::>() .join(" "); - let hostname = match data.deploy_data.cmd_overrides.hostname { - Some(ref x) => x, - None => &data.deploy_data.node.node_settings.hostname, + let hostname = match data.deploy_data.hostname { + Some(x) => x, + None => data.deploy_data.node.node_settings.hostname.as_str(), }; let copy_exit_status = copy_command diff --git a/src/settings.rs b/src/settings.rs index 3bba2cac..061a6d24 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -2,16 +2,23 @@ // // SPDX-License-Identifier: MPL-2.0 +use clap::Clap; use envmnt::{self, ExpandOptions, ExpansionType}; use merge::Merge; use serde::{Deserialize, Deserializer}; use std::collections::HashMap; -#[derive(Deserialize, Debug, Clone, Merge)] +#[derive(Clap, Deserialize, Debug, Clone, Merge)] pub struct GenericSettings { + /// Override the SSH user with the given value + #[clap(long)] #[serde(rename(deserialize = "sshUser"))] pub ssh_user: Option, + /// Override the profile user with the given value + #[clap(long = "profile-user")] pub user: Option, + /// Override the SSH options used + #[clap(long, multiple_occurrences(true), multiple_values(true))] #[serde( skip_serializing_if = "Vec::is_empty", default, @@ -20,14 +27,23 @@ pub struct GenericSettings { )] #[merge(strategy = merge::vec::append)] pub ssh_opts: Vec, + /// Override if the connecting to the target node should be considered fast + #[clap(long)] #[serde(rename(deserialize = "fastConnection"))] pub fast_connection: Option, + /// Override if a rollback should be attempted if activation fails + #[clap(long)] #[serde(rename(deserialize = "autoRollback"))] pub auto_rollback: Option, + /// How long activation should wait for confirmation (if using magic-rollback) + #[clap(long)] #[serde(rename(deserialize = "confirmTimeout"))] pub confirm_timeout: Option, + /// Where to store temporary files (only used by magic-rollback) + #[clap(long)] #[serde(rename(deserialize = "tempPath"))] pub temp_path: Option, + #[clap(long)] #[serde(rename(deserialize = "magicRollback"))] pub magic_rollback: Option, } From 29a90ae89ac2c7da6bd14d1380870dc410086e90 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Thu, 26 Aug 2021 16:55:55 -0500 Subject: [PATCH 07/53] Refactor accesor for ssh uri from DeployData --- src/data.rs | 35 +++++++++++++++++++++++++++++++++++ src/deploy.rs | 51 +++++++++++++++------------------------------------ src/push.rs | 25 ++++++++----------------- 3 files changed, 58 insertions(+), 53 deletions(-) diff --git a/src/data.rs b/src/data.rs index 456a9b9a..de5393eb 100644 --- a/src/data.rs +++ b/src/data.rs @@ -220,6 +220,8 @@ pub struct DeployDefs { pub enum DeployDataDefsError { #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] NoProfileUser(String, String), + #[error("Value `hostname` is not define for profile {0} of node {1}")] + NoProfileHost(String, String), } impl<'a> DeployData<'a> { @@ -246,6 +248,39 @@ impl<'a> DeployData<'a> { }) } + pub fn ssh_uri(&'a self) -> Result { + + let hostname = match self.hostname { + Some(x) => x, + None => &self.node.node_settings.hostname, + }; + let curr_user = &whoami::username(); + let ssh_user = match self.merged_settings.ssh_user { + Some(ref u) => u, + None => curr_user, + }; + Ok(format!("ssh://{}@{}", ssh_user, hostname)) + } + + // can be dropped once ssh fully supports ipv6 uris + pub fn ssh_non_uri(&'a self) -> Result { + + let hostname = match self.hostname { + Some(x) => x, + None => &self.node.node_settings.hostname, + }; + let curr_user = &whoami::username(); + let ssh_user = match self.merged_settings.ssh_user { + Some(ref u) => u, + None => curr_user, + }; + Ok(format!("{}@{}", ssh_user, hostname)) + } + + pub fn ssh_opts(&'a self) -> impl Iterator { + self.merged_settings.ssh_opts.iter() + } + pub fn get_profile_path(&'a self) -> Result { let profile_user = self.get_profile_user()?; let profile_path = match self.profile.profile_settings.profile_path { diff --git a/src/deploy.rs b/src/deploy.rs index 906ff4e0..386cfbfd 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -204,20 +204,19 @@ pub enum ConfirmProfileError { "Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}" )] SSHConfirmExit(Option), + + #[error("Deployment data invalid: {0}")] + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), } pub async fn confirm_profile( deploy_data: &data::DeployData<'_>, deploy_defs: &data::DeployDefs, temp_path: Cow<'_, str>, - ssh_addr: &str, ) -> Result<(), ConfirmProfileError> { let mut ssh_confirm_command = Command::new("ssh"); - ssh_confirm_command.arg(ssh_addr); - - for ssh_opt in &deploy_data.merged_settings.ssh_opts { - ssh_confirm_command.arg(ssh_opt); - } + ssh_confirm_command.arg(deploy_data.ssh_non_uri()?); + ssh_confirm_command.args(deploy_data.ssh_opts()); let lock_path = super::make_lock_path(&temp_path, &deploy_data.profile.profile_settings.path); @@ -264,6 +263,9 @@ pub enum DeployProfileError { #[error("Error confirming deployment: {0}")] Confirm(#[from] ConfirmProfileError), + + #[error("Deployment data invalid: {0}")] + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), } pub async fn deploy_profile( @@ -304,19 +306,9 @@ pub async fn deploy_profile( debug!("Constructed activation command: {}", self_activate_command); - let hostname = match deploy_data.hostname { - Some(x) => x, - None => deploy_data.node.node_settings.hostname.as_str(), - }; - - let ssh_addr = format!("{}@{}", deploy_defs.ssh_user, hostname); - let mut ssh_activate_command = Command::new("ssh"); - ssh_activate_command.arg(&ssh_addr); - - for ssh_opt in &deploy_data.merged_settings.ssh_opts { - ssh_activate_command.arg(&ssh_opt); - } + ssh_activate_command.arg(deploy_data.ssh_non_uri()?); + ssh_activate_command.args(deploy_data.ssh_opts()); if !magic_rollback || dry_activate { let ssh_activate_exit_status = ssh_activate_command @@ -354,11 +346,8 @@ pub async fn deploy_profile( info!("Creating activation waiter"); let mut ssh_wait_command = Command::new("ssh"); - ssh_wait_command.arg(&ssh_addr); - - for ssh_opt in &deploy_data.merged_settings.ssh_opts { - ssh_wait_command.arg(ssh_opt); - } + ssh_wait_command.arg(deploy_data.ssh_non_uri()?); + ssh_wait_command.args(deploy_data.ssh_opts()); let (send_activate, recv_activate) = tokio::sync::oneshot::channel(); let (send_activated, recv_activated) = tokio::sync::oneshot::channel(); @@ -396,7 +385,7 @@ pub async fn deploy_profile( info!("Success activating, attempting to confirm activation"); - let c = confirm_profile(deploy_data, deploy_defs, temp_path, &ssh_addr).await; + let c = confirm_profile(deploy_data, deploy_defs, temp_path).await; recv_activated.await.unwrap(); c?; } @@ -431,19 +420,9 @@ pub async fn revoke( debug!("Constructed revoke command: {}", self_revoke_command); - let hostname = match deploy_data.hostname { - Some(x) => x, - None => deploy_data.node.node_settings.hostname.as_str(), - }; - - let ssh_addr = format!("{}@{}", deploy_defs.ssh_user, hostname); - let mut ssh_activate_command = Command::new("ssh"); - ssh_activate_command.arg(&ssh_addr); - - for ssh_opt in &deploy_data.merged_settings.ssh_opts { - ssh_activate_command.arg(&ssh_opt); - } + ssh_activate_command.arg(deploy_data.ssh_non_uri()?); + ssh_activate_command.args(deploy_data.ssh_opts()); let ssh_revoke = ssh_activate_command .arg(self_revoke_command) diff --git a/src/push.rs b/src/push.rs index d7966d27..a5f53528 100644 --- a/src/push.rs +++ b/src/push.rs @@ -43,6 +43,9 @@ pub enum PushProfileError { Copy(std::io::Error), #[error("Nix copy command resulted in a bad exit code: {0:?}")] CopyExit(Option), + + #[error("Deployment data invalid: {0}")] + InvalidDeployDataDefs(#[from] data::DeployDataDefsError), } pub struct PushProfileData<'a> { @@ -198,26 +201,14 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr copy_command.arg("--no-check-sigs"); } - let ssh_opts_str = data - .deploy_data - .merged_settings - .ssh_opts - // This should provide some extra safety, but it also breaks for some reason, oh well - // .iter() - // .map(|x| format!("'{}'", x)) - // .collect::>() - .join(" "); - - let hostname = match data.deploy_data.hostname { - Some(x) => x, - None => data.deploy_data.node.node_settings.hostname.as_str(), - }; - let copy_exit_status = copy_command .arg("--to") - .arg(format!("ssh://{}@{}", data.deploy_defs.ssh_user, hostname)) + .arg(data.deploy_data.ssh_uri()?) .arg(&data.deploy_data.profile.profile_settings.path) - .env("NIX_SSHOPTS", ssh_opts_str) + .env( + "NIX_SSHOPTS", + data.deploy_data.ssh_opts().fold("".to_string(), |s, o| format!("{} {}", s, o)) + ) .status() .await .map_err(PushProfileError::Copy)?; From 492b4decc664927dbee77cf713a086ccdecf088e Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sun, 8 Aug 2021 15:09:32 -0500 Subject: [PATCH 08/53] Refactor & simplify the target setting resolver --- Cargo.lock | 18 ++++++ Cargo.toml | 1 + src/cli.rs | 158 ++++++++---------------------------------------- src/data.rs | 171 ++++++++++++++++++++++++++++++++++++++++------------ 4 files changed, 177 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1b1060e..65b9f993 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "aho-corasick" version = "0.7.15" @@ -140,6 +142,7 @@ dependencies = [ "flexi_logger", "fork", "futures-util", + "linked_hash_set", "log", "merge", "notify", @@ -398,6 +401,21 @@ version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lock_api" version = "0.4.2" diff --git a/Cargo.toml b/Cargo.toml index cdeeb6af..c2e34c27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ clap = "3.0.0-beta.2" flexi_logger = "0.16" fork = "0.1" futures-util = "0.3.6" +linked_hash_set = "0.1.4" log = "0.4" merge = "0.1.0" notify = "5.0.0-pre.3" diff --git a/src/cli.rs b/src/cli.rs index 51d2f1d1..37493dda 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -67,14 +67,13 @@ struct PromptPart<'a> { fn print_deployment( parts: &[( - &data::Target, - data::DeployData, + &data::DeployData, data::DeployDefs, )], ) -> Result<(), toml::ser::Error> { let mut part_map: HashMap> = HashMap::new(); - for (_, data, defs) in parts { + for (data, defs) in parts { part_map .entry(data.node_name.to_string()) .or_insert_with(HashMap::new) @@ -110,8 +109,7 @@ pub enum PromptDeploymentError { fn prompt_deployment( parts: &[( - &data::Target, - data::DeployData, + &data::DeployData, data::DeployDefs, )], ) -> Result<(), PromptDeploymentError> { @@ -166,12 +164,9 @@ pub enum RunDeployError { DeployProfile(#[from] deploy::deploy::DeployProfileError), #[error("Failed to push profile: {0}")] PushProfile(#[from] deploy::push::PushProfileError), - #[error("No profile named `{0}` was found")] - ProfileNotFound(String), - #[error("No node named `{0}` was found")] - NodeNotFound(String), - #[error("Profile was provided without a node name")] - ProfileWithoutNode, + #[error("Failed to resolve target: {0}")] + ResolveTarget(#[from] data::ResolveTargetError), + #[error("Error processing deployment definitions: {0}")] InvalidDeployDataDefs(#[from] data::DeployDataDefsError), #[error("Failed to make printable TOML of deployment: {0}")] @@ -182,13 +177,6 @@ pub enum RunDeployError { RevokeProfile(#[from] deploy::deploy::RevokeProfileError), } -type ToDeploy<'a> = Vec<( - &'a data::Target, - &'a settings::Root, - (&'a str, &'a settings::Node), - (&'a str, &'a settings::Profile), -)>; - async fn run_deploy( targets: Vec, settings: Vec, @@ -197,125 +185,27 @@ async fn run_deploy( cmd_settings: settings::GenericSettings, cmd_flags: data::Flags, ) -> Result<(), RunDeployError> { - let to_deploy: ToDeploy = targets - .iter() - .zip(&settings) - .map(|(target, root)| { - let to_deploys: ToDeploy = match (&target.node, &target.profile) { - (Some(node_name), Some(profile_name)) => { - let node = match root.nodes.get(node_name) { - Some(x) => x, - None => return Err(RunDeployError::NodeNotFound(node_name.clone())), - }; - let profile = match node.node_settings.profiles.get(profile_name) { - Some(x) => x, - None => return Err(RunDeployError::ProfileNotFound(profile_name.clone())), - }; - - vec![( - &target, - &root, - (node_name.as_str(), node), - (profile_name.as_str(), profile), - )] - } - (Some(node_name), None) => { - let node = match root.nodes.get(node_name) { - Some(x) => x, - None => return Err(RunDeployError::NodeNotFound(node_name.clone())), - }; - - let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); - - for profile_name in [ - node.node_settings.profiles_order.iter().collect(), - node.node_settings.profiles.keys().collect::>(), - ] - .concat() - { - let profile = match node.node_settings.profiles.get(profile_name) { - Some(x) => x, - None => { - return Err(RunDeployError::ProfileNotFound(profile_name.clone())) - } - }; - - if !profiles_list.iter().any(|(n, _)| n == profile_name) { - profiles_list.push((profile_name, profile)); - } - } - - profiles_list - .into_iter() - .map(|x| (target, root, (node_name.as_str(), node), x)) - .collect() - } - (None, None) => { - let mut l = Vec::new(); - - for (node_name, node) in &root.nodes { - let mut profiles_list: Vec<(&str, &settings::Profile)> = Vec::new(); - - for profile_name in [ - node.node_settings.profiles_order.iter().collect(), - node.node_settings.profiles.keys().collect::>(), - ] - .concat() - { - let profile = match node.node_settings.profiles.get(profile_name) { - Some(x) => x, - None => { - return Err(RunDeployError::ProfileNotFound( - profile_name.clone(), - )) - } - }; - - if !profiles_list.iter().any(|(n, _)| n == profile_name) { - profiles_list.push((profile_name, profile)); - } - } - - let ll: ToDeploy = profiles_list - .into_iter() - .map(|x| (target, root, (node_name.as_str(), node), x)) - .collect(); - - l.extend(ll); - } - - l - } - (None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode), - }; - Ok(to_deploys) - }) - .collect::, RunDeployError>>()? - .into_iter() - .flatten() - .collect(); + let deploy_datas_ = targets.into_iter().zip(&settings) + .map( + |(target, root)| + target.resolve( + &root, + &cmd_settings, + &cmd_flags, + hostname.as_deref(), + ) + ) + .collect::>>, data::ResolveTargetError>>()?; + let deploy_datas: Vec<&data::DeployData<'_>> = deploy_datas_.iter().flatten().collect(); let mut parts: Vec<( - &data::Target, - data::DeployData, + &data::DeployData, data::DeployDefs, )> = Vec::new(); - for (target, root, (node_name, node), (profile_name, profile)) in to_deploy { - let deploy_data = data::make_deploy_data( - &root.generic_settings, - &cmd_settings, - &cmd_flags, - node, - node_name, - profile, - profile_name, - hostname.as_deref(), - ); - + for deploy_data in deploy_datas { let deploy_defs = deploy_data.defs()?; - - parts.push((target, deploy_data, deploy_defs)); + parts.push((deploy_data, deploy_defs)); } if cmd_flags.interactive { @@ -324,11 +214,11 @@ async fn run_deploy( print_deployment(&parts[..])?; } - for (target, deploy_data, deploy_defs) in &parts { + for (deploy_data, deploy_defs) in &parts { deploy::push::push_profile(deploy::push::PushProfileData { supports_flakes: &supports_flakes, check_sigs: &cmd_flags.checksigs, - repo: &target.repo, + repo: &deploy_data.repo, deploy_data: &deploy_data, deploy_defs: &deploy_defs, keep_result: &cmd_flags.keep_result, @@ -344,7 +234,7 @@ async fn run_deploy( // In case of an error rollback any previoulsy made deployment. // Rollbacks adhere to the global seeting to auto_rollback and secondary // the profile's configuration - for (_, deploy_data, deploy_defs) in &parts { + for (deploy_data, deploy_defs) in &parts { if let Err(e) = deploy::deploy::deploy_profile(deploy_data, deploy_defs, cmd_flags.dry_activate).await { error!("{}", e); diff --git a/src/data.rs b/src/data.rs index de5393eb..82595880 100644 --- a/src/data.rs +++ b/src/data.rs @@ -3,6 +3,7 @@ // // SPDX-License-Identifier: MPL-2.0 +use linked_hash_set::LinkedHashSet; use rnix::{types::*, SyntaxKind::*}; use merge::Merge; use thiserror::Error; @@ -24,6 +25,98 @@ pub enum ParseTargetError { #[error("Unrecognized node or token encountered")] Unrecognized, } + +#[derive(Error, Debug)] +pub enum ResolveTargetError { + #[error("No node named `{0}` was found in repo `{1}`")] + NodeNotFound(String, String), + #[error("No profile named `{0}` was on node `{1}` found in repo `{2}`")] + ProfileNotFound(String, String, String), + #[error("Profile was provided without a node name for repo `{0}`")] + ProfileWithoutNode(String), +} + +impl<'a> Target { + pub fn resolve( + self, + r: &'a settings::Root, + cs: &'a settings::GenericSettings, + cf: &'a Flags, + hostname: Option<&'a str>, + ) -> Result>, ResolveTargetError> { + match self { + Target{repo, node: Some(node), profile} => { + let node_ = match r.nodes.get(&node) { + Some(x) => x, + None => return Err(ResolveTargetError::NodeNotFound( + node.to_owned(), repo.to_owned() + )), + }; + if let Some(profile) = profile { + let profile_ = match node_.node_settings.profiles.get(&profile) { + Some(x) => x, + None => return Err(ResolveTargetError::ProfileNotFound( + profile.to_owned(), node.to_owned(), repo.to_owned() + )), + }; + Ok({ + let d = DeployData::new( + repo.to_owned(), + node.to_owned(), + profile.to_owned(), + &r.generic_settings, + cs, + cf, + node_, + profile_, + hostname, + ); + vec![d] + }) + } else { + let ordered_profile_names: LinkedHashSet:: = node_.node_settings.profiles_order.iter().cloned().collect(); + let profile_names: LinkedHashSet:: = node_.node_settings.profiles.keys().cloned().collect(); + let prioritized_profile_names: LinkedHashSet::<&String> = ordered_profile_names.union(&profile_names).collect(); + Ok( + prioritized_profile_names + .iter() + .map( + |p| + Target{repo: repo.to_owned(), node: Some(node.to_owned()), profile: Some(p.to_string())}.resolve( + r, cs, cf, hostname, + ) + ) + .collect::>>, ResolveTargetError>>()? + .into_iter().flatten().collect::>>() + ) + } + }, + Target{repo, node: None, profile: None} => { + if let Some(hostname) = hostname { + todo!() // create issue to discuss: + // if allowed, it would be really awkward + // to override the hostname for a series of nodes at once + } + Ok( + r.nodes + .iter() + .map( + |(n, _)| + Target{repo: repo.to_owned(), node: Some(n.to_string()), profile: None}.resolve( + r, cs, cf, hostname, + ) + ) + .collect::>>, ResolveTargetError>>()? + .into_iter().flatten().collect::>>() + ) + }, + Target{repo, node: None, profile: Some(_)} => return Err(ResolveTargetError::ProfileWithoutNode( + repo.to_owned() + )) + } + } +} + impl std::str::FromStr for Target { type Err = ParseTargetError; @@ -44,7 +137,7 @@ impl std::str::FromStr for Target { Some(x) => x, None => { return Ok(Target { - repo, + repo: repo.to_owned(), node: None, profile: None, }) @@ -54,7 +147,7 @@ impl std::str::FromStr for Target { let mut node_over = false; for entry in first_child.children_with_tokens() { - let x: Option = match (entry.kind(), node_over) { + let x = match (entry.kind(), node_over) { (TOKEN_DOT, false) => { node_over = true; None @@ -86,9 +179,9 @@ impl std::str::FromStr for Target { } Ok(Target { - repo, - node, - profile, + repo: repo.to_owned(), + node: node, + profile: profile, }) } } @@ -161,9 +254,10 @@ fn test_deploy_target_from_str() { #[derive(Debug, Clone)] pub struct DeployData<'a> { - pub node_name: &'a str, + pub repo: String, + pub node_name: String, + pub profile_name: String, pub node: &'a settings::Node, - pub profile_name: &'a str, pub profile: &'a settings::Profile, pub hostname: Option<&'a str>, @@ -225,6 +319,39 @@ pub enum DeployDataDefsError { } impl<'a> DeployData<'a> { + + fn new( + repo: String, + node_name: String, + profile_name: String, + top_settings: &'a settings::GenericSettings, + cmd_settings: &'a settings::GenericSettings, + flags: &'a Flags, + node: &'a settings::Node, + profile: &'a settings::Profile, + hostname: Option<&'a str>, + ) -> DeployData<'a> { + let mut merged_settings = cmd_settings.clone(); + merged_settings.merge(profile.generic_settings.clone()); + merged_settings.merge(node.generic_settings.clone()); + merged_settings.merge(top_settings.clone()); + + // if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { + // merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); + // } + + DeployData { + repo, + node_name, + profile_name, + node, + profile, + hostname, + flags, + merged_settings, + } + } + pub fn defs(&'a self) -> Result { let ssh_user = match self.merged_settings.ssh_user { Some(ref u) => u.clone(), @@ -312,33 +439,3 @@ impl<'a> DeployData<'a> { Ok(profile_user) } } - -pub fn make_deploy_data<'a>( - top_settings: &'a settings::GenericSettings, - cmd_settings: &'a settings::GenericSettings, - flags: &'a Flags, - node: &'a settings::Node, - node_name: &'a str, - profile: &'a settings::Profile, - profile_name: &'a str, - hostname: Option<&'a str>, -) -> DeployData<'a> { - let mut merged_settings = cmd_settings.clone(); - merged_settings.merge(profile.generic_settings.clone()); - merged_settings.merge(node.generic_settings.clone()); - merged_settings.merge(top_settings.clone()); - - // if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { - // merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); - // } - - DeployData { - node_name, - node, - profile_name, - profile, - hostname, - flags, - merged_settings, - } -} From 011b3519b7231f2c354baea05740edf28fb2631c Mon Sep 17 00:00:00 2001 From: David Arnold Date: Wed, 11 Aug 2021 14:55:39 -0500 Subject: [PATCH 09/53] Refactor merge DeployDefs into DeployData (single view) --- src/cli.rs | 2 +- src/data.rs | 28 ++++++++++++++-------------- src/deploy.rs | 6 +++--- src/push.rs | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 37493dda..51a0a1a7 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -168,7 +168,7 @@ pub enum RunDeployError { ResolveTarget(#[from] data::ResolveTargetError), #[error("Error processing deployment definitions: {0}")] - InvalidDeployDataDefs(#[from] data::DeployDataDefsError), + DeployData(#[from] data::DeployDataError), #[error("Failed to make printable TOML of deployment: {0}")] TomlFormat(#[from] toml::ser::Error), #[error("{0}")] diff --git a/src/data.rs b/src/data.rs index 82595880..66c9063c 100644 --- a/src/data.rs +++ b/src/data.rs @@ -265,6 +265,14 @@ pub struct DeployData<'a> { pub merged_settings: settings::GenericSettings, } +#[derive(Error, Debug)] +pub enum DeployDataError { + #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] + NoProfileUser(String, String), + #[error("Value `hostname` is not define for profile {0} of node {1}")] + NoProfileHost(String, String), +} + #[derive(Clap, Debug, Clone)] pub struct Flags { /// Check signatures when using `nix copy` @@ -310,14 +318,6 @@ pub struct DeployDefs { pub sudo: Option, } -#[derive(Error, Debug)] -pub enum DeployDataDefsError { - #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] - NoProfileUser(String, String), - #[error("Value `hostname` is not define for profile {0} of node {1}")] - NoProfileHost(String, String), -} - impl<'a> DeployData<'a> { fn new( @@ -352,7 +352,7 @@ impl<'a> DeployData<'a> { } } - pub fn defs(&'a self) -> Result { + pub fn defs(&'a self) -> Result { let ssh_user = match self.merged_settings.ssh_user { Some(ref u) => u.clone(), None => whoami::username(), @@ -375,7 +375,7 @@ impl<'a> DeployData<'a> { }) } - pub fn ssh_uri(&'a self) -> Result { + pub fn ssh_uri(&'a self) -> Result { let hostname = match self.hostname { Some(x) => x, @@ -390,7 +390,7 @@ impl<'a> DeployData<'a> { } // can be dropped once ssh fully supports ipv6 uris - pub fn ssh_non_uri(&'a self) -> Result { + pub fn ssh_non_uri(&'a self) -> Result { let hostname = match self.hostname { Some(x) => x, @@ -408,7 +408,7 @@ impl<'a> DeployData<'a> { self.merged_settings.ssh_opts.iter() } - pub fn get_profile_path(&'a self) -> Result { + pub fn get_profile_path(&'a self) -> Result { let profile_user = self.get_profile_user()?; let profile_path = match self.profile.profile_settings.profile_path { None => match &profile_user[..] { @@ -423,13 +423,13 @@ impl<'a> DeployData<'a> { Ok(profile_path) } - pub fn get_profile_user(&'a self) -> Result { + pub fn get_profile_user(&'a self) -> Result { let profile_user = match self.merged_settings.user { Some(ref x) => x.clone(), None => match self.merged_settings.ssh_user { Some(ref x) => x.clone(), None => { - return Err(DeployDataDefsError::NoProfileUser( + return Err(DeployDataError::NoProfileUser( self.profile_name.to_owned(), self.node_name.to_owned(), )) diff --git a/src/deploy.rs b/src/deploy.rs index 386cfbfd..3feb538c 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -206,7 +206,7 @@ pub enum ConfirmProfileError { SSHConfirmExit(Option), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] data::DeployDataDefsError), + DeployData(#[from] data::DeployDataError), } pub async fn confirm_profile( @@ -265,7 +265,7 @@ pub enum DeployProfileError { Confirm(#[from] ConfirmProfileError), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] data::DeployDataDefsError), + DeployData(#[from] data::DeployDataError), } pub async fn deploy_profile( @@ -404,7 +404,7 @@ pub enum RevokeProfileError { SSHRevokeExit(Option), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] data::DeployDataDefsError), + DeployData(#[from] data::DeployDataError), } pub async fn revoke( deploy_data: &data::DeployData<'_>, diff --git a/src/push.rs b/src/push.rs index a5f53528..6c0f0b92 100644 --- a/src/push.rs +++ b/src/push.rs @@ -45,7 +45,7 @@ pub enum PushProfileError { CopyExit(Option), #[error("Deployment data invalid: {0}")] - InvalidDeployDataDefs(#[from] data::DeployDataDefsError), + DeployData(#[from] data::DeployDataError), } pub struct PushProfileData<'a> { From ce40968409f4ad49e768add1616c04dd9bb4e053 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Thu, 26 Aug 2021 17:08:17 -0500 Subject: [PATCH 10/53] Refactor & simplify the target setting resolver --- src/cli.rs | 18 ++- src/data.rs | 86 +++++++++-- src/deploy.rs | 390 ++++++++++++++++++++++++++++---------------------- src/push.rs | 2 +- 4 files changed, 303 insertions(+), 193 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 51a0a1a7..b2fcb9f8 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -235,7 +235,14 @@ async fn run_deploy( // Rollbacks adhere to the global seeting to auto_rollback and secondary // the profile's configuration for (deploy_data, deploy_defs) in &parts { - if let Err(e) = deploy::deploy::deploy_profile(deploy_data, deploy_defs, cmd_flags.dry_activate).await + if let Err(e) = deploy::deploy::deploy_profile( + &deploy_data.node_name, + &deploy_data.profile_name, + deploy::deploy::SshCommand::from_data(&deploy_data)?, + deploy::deploy::ActivateCommand::from_data(&deploy_data), + deploy::deploy::WaitCommand::from_data(&deploy_data), + deploy::deploy::ConfirmCommand::from_data(&deploy_data), + ).await { error!("{}", e); if cmd_flags.dry_activate { @@ -246,9 +253,14 @@ async fn run_deploy( // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) - for (deploy_data, deploy_defs) in &succeeded { + for (deploy_data, _) in &succeeded { if deploy_data.merged_settings.auto_rollback.unwrap_or(true) { - deploy::deploy::revoke(*deploy_data, *deploy_defs).await?; + deploy::deploy::revoke( + &deploy_data.node_name, + &deploy_data.profile_name, + deploy::deploy::SshCommand::from_data(&deploy_data)?, + deploy::deploy::RevokeCommand::from_data(&deploy_data), + ).await?; } } } diff --git a/src/data.rs b/src/data.rs index 66c9063c..3dddc702 100644 --- a/src/data.rs +++ b/src/data.rs @@ -34,6 +34,8 @@ pub enum ResolveTargetError { ProfileNotFound(String, String, String), #[error("Profile was provided without a node name for repo `{0}`")] ProfileWithoutNode(String), + #[error("Deployment data invalid: {0}")] + InvalidDeployDataError(#[from] DeployDataError), } impl<'a> Target { @@ -70,7 +72,7 @@ impl<'a> Target { node_, profile_, hostname, - ); + )?; vec![d] }) } else { @@ -257,12 +259,18 @@ pub struct DeployData<'a> { pub repo: String, pub node_name: String, pub profile_name: String, - pub node: &'a settings::Node, - pub profile: &'a settings::Profile, + pub hostname: Option<&'a str>, pub flags: &'a Flags, + pub node: &'a settings::Node, + pub profile: &'a settings::Profile, pub merged_settings: settings::GenericSettings, + + pub ssh_user: String, + pub temp_path: String, + pub profile_path: String, + pub sudo: Option, } #[derive(Error, Debug)] @@ -330,7 +338,7 @@ impl<'a> DeployData<'a> { node: &'a settings::Node, profile: &'a settings::Profile, hostname: Option<&'a str>, - ) -> DeployData<'a> { + ) -> Result, DeployDataError> { let mut merged_settings = cmd_settings.clone(); merged_settings.merge(profile.generic_settings.clone()); merged_settings.merge(node.generic_settings.clone()); @@ -339,17 +347,49 @@ impl<'a> DeployData<'a> { // if let Some(ref ssh_opts) = cmd_overrides.ssh_opts { // merged_settings.ssh_opts = ssh_opts.split(' ').map(|x| x.to_owned()).collect(); // } + let temp_path = match merged_settings.temp_path { + Some(ref x) => x.to_owned(), + None => "/tmp".to_string(), + }; + let profile_user = match merged_settings.user { + Some(ref x) => x.to_owned(), + None => match merged_settings.ssh_user { + Some(ref x) => x.to_owned(), + None => { + return Err(DeployDataError::NoProfileUser(profile_name, node_name)) + } + }, + }; + let profile_path = match profile.profile_settings.profile_path { + None => format!("/nix/var/nix/profiles/{}", match &profile_user[..] { + "root" => profile_name.to_owned(), + _ => format!("per-user/{}/{}", profile_user, profile_name), + }), + Some(ref x) => x.to_owned(), + }; + let ssh_user = match merged_settings.ssh_user { + Some(ref u) => u.to_owned(), + None => whoami::username(), + }; + let sudo = match merged_settings.user { + Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), + _ => None, + }; - DeployData { + Ok(DeployData { repo, node_name, profile_name, node, profile, hostname, + ssh_user, + temp_path, + profile_path, + sudo, flags, merged_settings, - } + }) } pub fn defs(&'a self) -> Result { @@ -357,15 +397,9 @@ impl<'a> DeployData<'a> { Some(ref u) => u.clone(), None => whoami::username(), }; - let profile_user = self.get_profile_user()?; - let profile_path = self.get_profile_path()?; - - let sudo: Option = match self.merged_settings.user { - Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), - _ => None, - }; + let sudo = self.sudo()?; Ok(DeployDefs { ssh_user, @@ -375,6 +409,28 @@ impl<'a> DeployData<'a> { }) } + pub fn sudo(&'a self) -> Result, DeployDataError> { + let ssh_user = match self.merged_settings.ssh_user { + Some(ref u) => u.clone(), + None => whoami::username(), + }; + Ok( + match self.merged_settings.user { + Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), + _ => None, + } + ) + } + + pub fn temp_path(&'a self) -> Result { + Ok( + match self.merged_settings.temp_path { + Some(ref x) => x.to_owned(), + None => "/tmp".to_string(), + } + ) + } + pub fn ssh_uri(&'a self) -> Result { let hostname = match self.hostname { @@ -404,8 +460,8 @@ impl<'a> DeployData<'a> { Ok(format!("{}@{}", ssh_user, hostname)) } - pub fn ssh_opts(&'a self) -> impl Iterator { - self.merged_settings.ssh_opts.iter() + pub fn ssh_opts(&'a self) -> Result, DeployDataError> { + Ok(self.merged_settings.ssh_opts.iter()) } pub fn get_profile_path(&'a self) -> Result { diff --git a/src/deploy.rs b/src/deploy.rs index 3feb538c..23c57b21 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -5,18 +5,41 @@ // SPDX-License-Identifier: MPL-2.0 use log::{debug, info}; -use std::borrow::Cow; use thiserror::Error; use tokio::process::Command; use crate::data; -struct ActivateCommandData<'a> { - sudo: &'a Option, +pub struct SshCommand<'a> { + hoststring: String, + opts: &'a Vec, +} + +impl<'a> SshCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Result { + let hostname = match d.hostname { + Some(x) => x, + None => &d.node.node_settings.hostname, + }; + let hoststring = format!("{}@{}", &d.ssh_user, hostname); + let opts = d.merged_settings.ssh_opts.as_ref(); + Ok(SshCommand {hoststring, opts}) + } + + fn build(&self) -> Command { + let mut cmd = Command::new("ssh"); + cmd.arg(&self.hoststring); + cmd.args(self.opts.iter()); + cmd + } +} + +pub struct ActivateCommand<'a> { + sudo: Option<&'a str>, profile_path: &'a str, + temp_path: &'a str, closure: &'a str, auto_rollback: bool, - temp_path: &'a str, confirm_timeout: u16, magic_rollback: bool, debug_logs: bool, @@ -24,49 +47,66 @@ struct ActivateCommandData<'a> { dry_activate: bool, } -fn build_activate_command(data: &ActivateCommandData) -> String { - let mut self_activate_command = format!("{}/activate-rs", data.closure); - - if data.debug_logs { - self_activate_command = format!("{} --debug-logs", self_activate_command); +impl<'a> ActivateCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + ActivateCommand { + sudo: d.sudo.as_deref(), + profile_path: &d.profile_path, + temp_path: &d.temp_path, + closure: &d.profile.profile_settings.path, + auto_rollback: d.merged_settings.auto_rollback.unwrap_or(true), + confirm_timeout: d.merged_settings.confirm_timeout.unwrap_or(30), + magic_rollback: d.merged_settings.magic_rollback.unwrap_or(true), + debug_logs: d.flags.debug_logs, + log_dir: d.flags.log_dir.as_deref(), + dry_activate: d.flags.dry_activate, + } } - if let Some(log_dir) = data.log_dir { - self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir); - } + fn build(self) -> String { + let mut cmd = format!("{}/activate-rs", self.closure); - self_activate_command = format!( - "{} activate '{}' '{}' --temp-path '{}'", - self_activate_command, data.closure, data.profile_path, data.temp_path - ); + if self.debug_logs { + cmd = format!("{} --debug-logs", cmd); + } - self_activate_command = format!( - "{} --confirm-timeout {}", - self_activate_command, data.confirm_timeout - ); + if let Some(log_dir) = self.log_dir { + cmd = format!("{} --log-dir {}", cmd, log_dir); + } - if data.magic_rollback { - self_activate_command = format!("{} --magic-rollback", self_activate_command); - } + cmd = format!( + "{} activate '{}' '{}' --temp-path '{}'", + cmd, self.closure, self.profile_path, self.temp_path + ); - if data.auto_rollback { - self_activate_command = format!("{} --auto-rollback", self_activate_command); - } + cmd = format!( + "{} --confirm-timeout {}", + cmd, self.confirm_timeout + ); - if data.dry_activate { - self_activate_command = format!("{} --dry-activate", self_activate_command); - } + if self.magic_rollback { + cmd = format!("{} --magic-rollback", cmd); + } - if let Some(sudo_cmd) = &data.sudo { - self_activate_command = format!("{} {}", sudo_cmd, self_activate_command); - } + if self.auto_rollback { + cmd = format!("{} --auto-rollback", cmd); + } + + if self.dry_activate { + cmd = format!("{} --dry-activate", cmd); + } + + if let Some(sudo_cmd) = &self.sudo { + cmd = format!("{} {}", sudo_cmd, cmd); + } - self_activate_command + cmd + } } #[test] fn test_activation_command_builder() { - let sudo = Some("sudo -u test".to_string()); + let sudo = Some("sudo -u test"); let profile_path = "/blah/profiles/test"; let closure = "/nix/store/blah/etc"; let auto_rollback = true; @@ -78,8 +118,8 @@ fn test_activation_command_builder() { let log_dir = Some("/tmp/something.txt"); assert_eq!( - build_activate_command(&ActivateCommandData { - sudo: &sudo, + ActivateCommand { + sudo, profile_path, closure, auto_rollback, @@ -89,113 +129,165 @@ fn test_activation_command_builder() { debug_logs, log_dir, dry_activate - }), + }.build(), "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt activate '/nix/store/blah/etc' '/blah/profiles/test' --temp-path '/tmp' --confirm-timeout 30 --magic-rollback --auto-rollback" .to_string(), ); } -struct WaitCommandData<'a> { - sudo: &'a Option, +pub struct WaitCommand<'a> { + sudo: Option<&'a str>, closure: &'a str, temp_path: &'a str, debug_logs: bool, log_dir: Option<&'a str>, } -fn build_wait_command(data: &WaitCommandData) -> String { - let mut self_activate_command = format!("{}/activate-rs", data.closure); - - if data.debug_logs { - self_activate_command = format!("{} --debug-logs", self_activate_command); +impl<'a> WaitCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + WaitCommand { + sudo: d.sudo.as_deref(), + temp_path: &d.temp_path, + closure: &d.profile.profile_settings.path, + debug_logs: d.flags.debug_logs, + log_dir: d.flags.log_dir.as_deref(), + } } - if let Some(log_dir) = data.log_dir { - self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir); - } + fn build(self) -> String { + let mut cmd = format!("{}/activate-rs", self.closure); - self_activate_command = format!( - "{} wait '{}' --temp-path '{}'", - self_activate_command, data.closure, data.temp_path, - ); + if self.debug_logs { + cmd = format!("{} --debug-logs", cmd); + } - if let Some(sudo_cmd) = &data.sudo { - self_activate_command = format!("{} {}", sudo_cmd, self_activate_command); - } + if let Some(log_dir) = self.log_dir { + cmd = format!("{} --log-dir {}", cmd, log_dir); + } + + cmd = format!( + "{} wait '{}' --temp-path '{}'", + cmd, self.closure, self.temp_path, + ); + + if let Some(sudo_cmd) = &self.sudo { + cmd = format!("{} {}", sudo_cmd, cmd); + } - self_activate_command + cmd + } } #[test] fn test_wait_command_builder() { - let sudo = Some("sudo -u test".to_string()); + let sudo = Some("sudo -u test"); let closure = "/nix/store/blah/etc"; let temp_path = "/tmp"; let debug_logs = true; let log_dir = Some("/tmp/something.txt"); assert_eq!( - build_wait_command(&WaitCommandData { - sudo: &sudo, + WaitCommand { + sudo, closure, temp_path, debug_logs, log_dir - }), + }.build(), "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt wait '/nix/store/blah/etc' --temp-path '/tmp'" .to_string(), ); } -struct RevokeCommandData<'a> { - sudo: &'a Option, +pub struct RevokeCommand<'a> { + sudo: Option<&'a str>, closure: &'a str, profile_path: &'a str, debug_logs: bool, log_dir: Option<&'a str>, } -fn build_revoke_command(data: &RevokeCommandData) -> String { - let mut self_activate_command = format!("{}/activate-rs", data.closure); - - if data.debug_logs { - self_activate_command = format!("{} --debug-logs", self_activate_command); +impl<'a> RevokeCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + RevokeCommand { + sudo: d.sudo.as_deref(), + profile_path: &d.profile_path, + closure: &d.profile.profile_settings.path, + debug_logs: d.flags.debug_logs, + log_dir: d.flags.log_dir.as_deref(), + } } - if let Some(log_dir) = data.log_dir { - self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir); - } - self_activate_command = format!("{} revoke '{}'", self_activate_command, data.profile_path); + fn build(self) -> String { + let mut cmd = format!("{}/activate-rs", self.closure); - if let Some(sudo_cmd) = &data.sudo { - self_activate_command = format!("{} {}", sudo_cmd, self_activate_command); - } + if self.debug_logs { + cmd = format!("{} --debug-logs", cmd); + } + + if let Some(log_dir) = self.log_dir { + cmd = format!("{} --log-dir {}", cmd, log_dir); + } + + cmd = format!("{} revoke '{}'", cmd, self.profile_path); + + if let Some(sudo_cmd) = &self.sudo { + cmd = format!("{} {}", sudo_cmd, cmd); + } - self_activate_command + cmd + } } #[test] fn test_revoke_command_builder() { - let sudo = Some("sudo -u test".to_string()); + let sudo = Some("sudo -u test"); let closure = "/nix/store/blah/etc"; let profile_path = "/nix/var/nix/per-user/user/profile"; let debug_logs = true; let log_dir = Some("/tmp/something.txt"); assert_eq!( - build_revoke_command(&RevokeCommandData { - sudo: &sudo, + RevokeCommand { + sudo, closure, profile_path, debug_logs, log_dir - }), + }.build(), "sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt revoke '/nix/var/nix/per-user/user/profile'" .to_string(), ); } +pub struct ConfirmCommand<'a> { + sudo: Option<&'a str>, + temp_path: &'a str, + closure: &'a str, +} + +impl<'a> ConfirmCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + ConfirmCommand { + sudo: d.sudo.as_deref(), + temp_path: &d.temp_path, + closure: &d.profile.profile_settings.path, + } + } + + + fn build(self) -> String { + let lock_path = super::make_lock_path(&self.temp_path, &self.closure); + + let mut cmd = format!("rm {}", lock_path); + if let Some(sudo_cmd) = &self.sudo { + cmd = format!("{} {}", sudo_cmd, cmd); + } + cmd + } +} + #[derive(Error, Debug)] pub enum ConfirmProfileError { #[error("Failed to run confirmation command over SSH (the server should roll back): {0}")] @@ -204,34 +296,24 @@ pub enum ConfirmProfileError { "Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}" )] SSHConfirmExit(Option), - - #[error("Deployment data invalid: {0}")] - DeployData(#[from] data::DeployDataError), } pub async fn confirm_profile( - deploy_data: &data::DeployData<'_>, - deploy_defs: &data::DeployDefs, - temp_path: Cow<'_, str>, + ssh: SshCommand<'_>, + confirm: ConfirmCommand<'_>, ) -> Result<(), ConfirmProfileError> { - let mut ssh_confirm_command = Command::new("ssh"); - ssh_confirm_command.arg(deploy_data.ssh_non_uri()?); - ssh_confirm_command.args(deploy_data.ssh_opts()); - let lock_path = super::make_lock_path(&temp_path, &deploy_data.profile.profile_settings.path); + let mut ssh_confirm_cmd = ssh.build(); - let mut confirm_command = format!("rm {}", lock_path); - if let Some(sudo_cmd) = &deploy_defs.sudo { - confirm_command = format!("{} {}", sudo_cmd, confirm_command); - } + let confirm_cmd = confirm.build(); debug!( "Attempting to run command to confirm deployment: {}", - confirm_command + confirm_cmd ); - let ssh_confirm_exit_status = ssh_confirm_command - .arg(confirm_command) + let ssh_confirm_exit_status = ssh_confirm_cmd + .arg(confirm_cmd) .status() .await .map_err(ConfirmProfileError::SSHConfirm)?; @@ -263,56 +345,31 @@ pub enum DeployProfileError { #[error("Error confirming deployment: {0}")] Confirm(#[from] ConfirmProfileError), - - #[error("Deployment data invalid: {0}")] - DeployData(#[from] data::DeployDataError), } pub async fn deploy_profile( - deploy_data: &data::DeployData<'_>, - deploy_defs: &data::DeployDefs, - dry_activate: bool, + node_name: &str, + profile_name: &str, + ssh: SshCommand<'_>, + activate: ActivateCommand<'_>, + wait: WaitCommand<'_>, + confirm: ConfirmCommand<'_>, ) -> Result<(), DeployProfileError> { - if !dry_activate { - info!( - "Activating profile `{}` for node `{}`", - deploy_data.profile_name, deploy_data.node_name - ); + if !activate.dry_activate { + info!("Activating profile `{}` for node `{}`", profile_name, node_name); } + let dry_activate = &activate.dry_activate.clone(); + let magic_rollback = &activate.magic_rollback.clone(); - let temp_path: Cow = match &deploy_data.merged_settings.temp_path { - Some(x) => x.into(), - None => "/tmp".into(), - }; - - let confirm_timeout = deploy_data.merged_settings.confirm_timeout.unwrap_or(30); - - let magic_rollback = deploy_data.merged_settings.magic_rollback.unwrap_or(true); - - let auto_rollback = deploy_data.merged_settings.auto_rollback.unwrap_or(true); + let activate_cmd = activate.build(); - let self_activate_command = build_activate_command(&ActivateCommandData { - sudo: &deploy_defs.sudo, - profile_path: &deploy_defs.profile_path, - closure: &deploy_data.profile.profile_settings.path, - auto_rollback, - temp_path: &temp_path, - confirm_timeout, - magic_rollback, - debug_logs: deploy_data.flags.debug_logs, - log_dir: deploy_data.flags.log_dir.as_deref(), - dry_activate, - }); + debug!("Constructed activation command: {}", activate_cmd); - debug!("Constructed activation command: {}", self_activate_command); + let mut ssh_activate_cmd = ssh.build(); - let mut ssh_activate_command = Command::new("ssh"); - ssh_activate_command.arg(deploy_data.ssh_non_uri()?); - ssh_activate_command.args(deploy_data.ssh_opts()); - - if !magic_rollback || dry_activate { - let ssh_activate_exit_status = ssh_activate_command - .arg(self_activate_command) + if !*magic_rollback || *dry_activate { + let ssh_activate_exit_status = ssh_activate_cmd + .arg(activate_cmd) .status() .await .map_err(DeployProfileError::SSHActivate)?; @@ -322,32 +379,25 @@ pub async fn deploy_profile( a => return Err(DeployProfileError::SSHActivateExit(a)), }; - if dry_activate { + if *dry_activate { info!("Completed dry-activate!"); } else { info!("Success activating, done!"); } } else { - let self_wait_command = build_wait_command(&WaitCommandData { - sudo: &deploy_defs.sudo, - closure: &deploy_data.profile.profile_settings.path, - temp_path: &temp_path, - debug_logs: deploy_data.flags.debug_logs, - log_dir: deploy_data.flags.log_dir.as_deref(), - }); + let wait_cmd = wait.build(); - debug!("Constructed wait command: {}", self_wait_command); + debug!("Constructed wait command: {}", wait_cmd); - let ssh_activate = ssh_activate_command - .arg(self_activate_command) + let ssh_activate = ssh_activate_cmd + .arg(activate_cmd) .spawn() .map_err(DeployProfileError::SSHSpawnActivate)?; info!("Creating activation waiter"); - let mut ssh_wait_command = Command::new("ssh"); - ssh_wait_command.arg(deploy_data.ssh_non_uri()?); - ssh_wait_command.args(deploy_data.ssh_opts()); + + let mut ssh_wait_cmd = ssh.build(); let (send_activate, recv_activate) = tokio::sync::oneshot::channel(); let (send_activated, recv_activated) = tokio::sync::oneshot::channel(); @@ -370,7 +420,7 @@ pub async fn deploy_profile( send_activated.send(()).unwrap(); }); tokio::select! { - x = ssh_wait_command.arg(self_wait_command).status() => { + x = ssh_wait_cmd.arg(wait_cmd).status() => { debug!("Wait command ended"); match x.map_err(DeployProfileError::SSHWait)?.code() { Some(0) => (), @@ -385,7 +435,7 @@ pub async fn deploy_profile( info!("Success activating, attempting to confirm activation"); - let c = confirm_profile(deploy_data, deploy_defs, temp_path).await; + let c = confirm_profile(ssh, confirm).await; recv_activated.await.unwrap(); c?; } @@ -402,34 +452,26 @@ pub enum RevokeProfileError { SSHRevoke(std::io::Error), #[error("Revoking over SSH resulted in a bad exit code: {0:?}")] SSHRevokeExit(Option), - - #[error("Deployment data invalid: {0}")] - DeployData(#[from] data::DeployDataError), } pub async fn revoke( - deploy_data: &data::DeployData<'_>, - deploy_defs: &data::DeployDefs, + node_name: &str, + profile_name: &str, + ssh: SshCommand<'_>, + revoke: RevokeCommand<'_>, ) -> Result<(), RevokeProfileError> { - let self_revoke_command = build_revoke_command(&RevokeCommandData { - sudo: &deploy_defs.sudo, - closure: &deploy_data.profile.profile_settings.path, - profile_path: &deploy_data.get_profile_path()?, - debug_logs: deploy_data.flags.debug_logs, - log_dir: deploy_data.flags.log_dir.as_deref(), - }); - - debug!("Constructed revoke command: {}", self_revoke_command); - - let mut ssh_activate_command = Command::new("ssh"); - ssh_activate_command.arg(deploy_data.ssh_non_uri()?); - ssh_activate_command.args(deploy_data.ssh_opts()); - - let ssh_revoke = ssh_activate_command - .arg(self_revoke_command) + info!("Revoking profile `{}` for node `{}`", profile_name, node_name); + + let revoke_cmd = revoke.build(); + debug!("Constructed revoke command: {}", revoke_cmd); + + let mut ssh_revoke_cmd = ssh.build(); + + let ssh_revoke_cmd = ssh_revoke_cmd + .arg(revoke_cmd) .spawn() .map_err(RevokeProfileError::SSHSpawnRevoke)?; - let result = ssh_revoke.wait_with_output().await; + let result = ssh_revoke_cmd.wait_with_output().await; match result { Err(x) => Err(RevokeProfileError::SSHRevoke(x)), diff --git a/src/push.rs b/src/push.rs index 6c0f0b92..76d11b92 100644 --- a/src/push.rs +++ b/src/push.rs @@ -207,7 +207,7 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr .arg(&data.deploy_data.profile.profile_settings.path) .env( "NIX_SSHOPTS", - data.deploy_data.ssh_opts().fold("".to_string(), |s, o| format!("{} {}", s, o)) + data.deploy_data.ssh_opts()?.fold("".to_string(), |s, o| format!("{} {}", s, o)) ) .status() .await From 6610bad1208de32c8af95b4be2c6a04515ee7736 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Thu, 12 Aug 2021 09:21:24 -0500 Subject: [PATCH 11/53] Refactor create push.rs views into data.rs owned data & cleanup --- src/cli.rs | 51 ++++------ src/data.rs | 136 +++----------------------- src/deploy.rs | 6 +- src/push.rs | 261 ++++++++++++++++++++++++++++++-------------------- 4 files changed, 193 insertions(+), 261 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index b2fcb9f8..fe8b1fa5 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -66,22 +66,19 @@ struct PromptPart<'a> { } fn print_deployment( - parts: &[( - &data::DeployData, - data::DeployDefs, - )], + parts: &[&data::DeployData], ) -> Result<(), toml::ser::Error> { let mut part_map: HashMap> = HashMap::new(); - for (data, defs) in parts { + for data in parts { part_map .entry(data.node_name.to_string()) .or_insert_with(HashMap::new) .insert( data.profile_name.to_string(), PromptPart { - user: &defs.profile_user, - ssh_user: &defs.ssh_user, + user: &data.profile_user, + ssh_user: &data.ssh_user, path: &data.profile.profile_settings.path, hostname: &data.node.node_settings.hostname, ssh_opts: &data.merged_settings.ssh_opts, @@ -108,10 +105,7 @@ pub enum PromptDeploymentError { } fn prompt_deployment( - parts: &[( - &data::DeployData, - data::DeployDefs, - )], + parts: &[&data::DeployData], ) -> Result<(), PromptDeploymentError> { print_deployment(parts)?; @@ -198,14 +192,10 @@ async fn run_deploy( .collect::>>, data::ResolveTargetError>>()?; let deploy_datas: Vec<&data::DeployData<'_>> = deploy_datas_.iter().flatten().collect(); - let mut parts: Vec<( - &data::DeployData, - data::DeployDefs, - )> = Vec::new(); + let mut parts: Vec<&data::DeployData> = Vec::new(); for deploy_data in deploy_datas { - let deploy_defs = deploy_data.defs()?; - parts.push((deploy_data, deploy_defs)); + parts.push(deploy_data); } if cmd_flags.interactive { @@ -214,27 +204,24 @@ async fn run_deploy( print_deployment(&parts[..])?; } - for (deploy_data, deploy_defs) in &parts { - deploy::push::push_profile(deploy::push::PushProfileData { - supports_flakes: &supports_flakes, - check_sigs: &cmd_flags.checksigs, - repo: &deploy_data.repo, - deploy_data: &deploy_data, - deploy_defs: &deploy_defs, - keep_result: &cmd_flags.keep_result, - result_path: cmd_flags.result_path.as_deref(), - extra_build_args: &cmd_flags.extra_build_args, - }) + for deploy_data in &parts { + deploy::push::push_profile( + supports_flakes, + deploy::push::ShowDerivationCommand::from_data(&deploy_data), + deploy::push::BuildCommand::from_data(&deploy_data), + deploy::push::SignCommand::from_data(&deploy_data), + deploy::push::CopyCommand::from_data(&deploy_data), + ) .await?; } - let mut succeeded: Vec<(&data::DeployData, &data::DeployDefs)> = vec![]; + let mut succeeded: Vec<&data::DeployData> = vec![]; // Run all deployments // In case of an error rollback any previoulsy made deployment. // Rollbacks adhere to the global seeting to auto_rollback and secondary // the profile's configuration - for (deploy_data, deploy_defs) in &parts { + for deploy_data in &parts { if let Err(e) = deploy::deploy::deploy_profile( &deploy_data.node_name, &deploy_data.profile_name, @@ -253,7 +240,7 @@ async fn run_deploy( // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) - for (deploy_data, _) in &succeeded { + for deploy_data in &succeeded { if deploy_data.merged_settings.auto_rollback.unwrap_or(true) { deploy::deploy::revoke( &deploy_data.node_name, @@ -266,7 +253,7 @@ async fn run_deploy( } break; } - succeeded.push((deploy_data, deploy_defs)) + succeeded.push(deploy_data) } Ok(()) diff --git a/src/data.rs b/src/data.rs index 3dddc702..6fd4473a 100644 --- a/src/data.rs +++ b/src/data.rs @@ -260,16 +260,18 @@ pub struct DeployData<'a> { pub node_name: String, pub profile_name: String, - pub hostname: Option<&'a str>, - pub flags: &'a Flags, pub node: &'a settings::Node, pub profile: &'a settings::Profile, pub merged_settings: settings::GenericSettings, + pub hostname: &'a str, + pub ssh_user: String, + pub ssh_uri: String, pub temp_path: String, pub profile_path: String, + pub profile_user: String, pub sudo: Option, } @@ -318,14 +320,6 @@ pub struct Flags { pub rollback_succeeded: bool, } -#[derive(Debug)] -pub struct DeployDefs { - pub ssh_user: String, - pub profile_user: String, - pub profile_path: String, - pub sudo: Option, -} - impl<'a> DeployData<'a> { fn new( @@ -351,14 +345,10 @@ impl<'a> DeployData<'a> { Some(ref x) => x.to_owned(), None => "/tmp".to_string(), }; - let profile_user = match merged_settings.user { - Some(ref x) => x.to_owned(), - None => match merged_settings.ssh_user { - Some(ref x) => x.to_owned(), - None => { - return Err(DeployDataError::NoProfileUser(profile_name, node_name)) - } - }, + let profile_user = if let Some(ref x) = merged_settings.user { x.to_owned() } else { + if let Some(ref x) = merged_settings.ssh_user { x.to_owned() } else { + return Err(DeployDataError::NoProfileUser(profile_name, node_name)) + } }; let profile_path = match profile.profile_settings.profile_path { None => format!("/nix/var/nix/profiles/{}", match &profile_user[..] { @@ -375,6 +365,11 @@ impl<'a> DeployData<'a> { Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), _ => None, }; + let hostname = match hostname { + Some(x) => x, + None => &node.node_settings.hostname, + }; + let ssh_uri = format!("ssh://{}@{}", &ssh_user, &hostname); Ok(DeployData { repo, @@ -384,114 +379,13 @@ impl<'a> DeployData<'a> { profile, hostname, ssh_user, + ssh_uri, temp_path, profile_path, + profile_user, sudo, flags, merged_settings, }) } - - pub fn defs(&'a self) -> Result { - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u.clone(), - None => whoami::username(), - }; - let profile_user = self.get_profile_user()?; - let profile_path = self.get_profile_path()?; - let sudo = self.sudo()?; - - Ok(DeployDefs { - ssh_user, - profile_user, - profile_path, - sudo, - }) - } - - pub fn sudo(&'a self) -> Result, DeployDataError> { - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u.clone(), - None => whoami::username(), - }; - Ok( - match self.merged_settings.user { - Some(ref user) if user != &ssh_user => Some(format!("sudo -u {}", user)), - _ => None, - } - ) - } - - pub fn temp_path(&'a self) -> Result { - Ok( - match self.merged_settings.temp_path { - Some(ref x) => x.to_owned(), - None => "/tmp".to_string(), - } - ) - } - - pub fn ssh_uri(&'a self) -> Result { - - let hostname = match self.hostname { - Some(x) => x, - None => &self.node.node_settings.hostname, - }; - let curr_user = &whoami::username(); - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u, - None => curr_user, - }; - Ok(format!("ssh://{}@{}", ssh_user, hostname)) - } - - // can be dropped once ssh fully supports ipv6 uris - pub fn ssh_non_uri(&'a self) -> Result { - - let hostname = match self.hostname { - Some(x) => x, - None => &self.node.node_settings.hostname, - }; - let curr_user = &whoami::username(); - let ssh_user = match self.merged_settings.ssh_user { - Some(ref u) => u, - None => curr_user, - }; - Ok(format!("{}@{}", ssh_user, hostname)) - } - - pub fn ssh_opts(&'a self) -> Result, DeployDataError> { - Ok(self.merged_settings.ssh_opts.iter()) - } - - pub fn get_profile_path(&'a self) -> Result { - let profile_user = self.get_profile_user()?; - let profile_path = match self.profile.profile_settings.profile_path { - None => match &profile_user[..] { - "root" => format!("/nix/var/nix/profiles/{}", self.profile_name), - _ => format!( - "/nix/var/nix/profiles/per-user/{}/{}", - profile_user, self.profile_name - ), - }, - Some(ref x) => x.clone(), - }; - Ok(profile_path) - } - - pub fn get_profile_user(&'a self) -> Result { - let profile_user = match self.merged_settings.user { - Some(ref x) => x.clone(), - None => match self.merged_settings.ssh_user { - Some(ref x) => x.clone(), - None => { - return Err(DeployDataError::NoProfileUser( - self.profile_name.to_owned(), - self.node_name.to_owned(), - )) - } - }, - }; - Ok(profile_user) - } } diff --git a/src/deploy.rs b/src/deploy.rs index 23c57b21..d517d6af 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -17,11 +17,7 @@ pub struct SshCommand<'a> { impl<'a> SshCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Result { - let hostname = match d.hostname { - Some(x) => x, - None => &d.node.node_settings.hostname, - }; - let hoststring = format!("{}@{}", &d.ssh_user, hostname); + let hoststring = format!("{}@{}", &d.ssh_user, d.hostname); let opts = d.merged_settings.ssh_opts.as_ref(); Ok(SshCommand {hoststring, opts}) } diff --git a/src/push.rs b/src/push.rs index 76d11b92..e1c0d88a 100644 --- a/src/push.rs +++ b/src/push.rs @@ -48,31 +48,158 @@ pub enum PushProfileError { DeployData(#[from] data::DeployDataError), } -pub struct PushProfileData<'a> { - pub supports_flakes: &'a bool, - pub check_sigs: &'a bool, - pub repo: &'a str, - pub deploy_data: &'a data::DeployData<'a>, - pub deploy_defs: &'a data::DeployDefs, - pub keep_result: &'a bool, - pub result_path: Option<&'a str>, - pub extra_build_args: &'a [String], +pub struct ShowDerivationCommand<'a> { + closure: &'a str, } -pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileError> { - debug!( - "Finding the deriver of store path for {}", - &data.deploy_data.profile.profile_settings.path - ); +impl<'a> ShowDerivationCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + ShowDerivationCommand { + closure: d.profile.profile_settings.path.as_str(), + } + } + + fn build(self) -> Command { + // `nix-store --query --deriver` doesn't work on invalid paths, so we parse output of show-derivation :( + let mut cmd = Command::new("nix"); + + cmd + .arg("show-derivation") + .arg(&self.closure); + //cmd.what_is_this; + cmd + } +} + +pub struct SignCommand<'a> { + closure: &'a str, +} + +impl<'a> SignCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + SignCommand { + closure: d.profile.profile_settings.path.as_str(), + } + } + + fn build(self, local_key: String) -> Command { + let mut cmd = Command::new("nix"); + + cmd + .arg("sign-paths") + .arg("-r") + .arg("-k") + .arg(local_key) + .arg(&self.closure); + //cmd.what_is_this; + cmd + } +} + +pub struct CopyCommand<'a> { + closure: &'a str, + fast_connection: bool, + check_sigs: &'a bool, + ssh_uri: &'a str, + ssh_opts: String, +} + +impl<'a> CopyCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + CopyCommand { + closure: d.profile.profile_settings.path.as_str(), + fast_connection: d.merged_settings.fast_connection.unwrap_or(false), + check_sigs: &d.flags.checksigs, + ssh_uri: d.ssh_uri.as_str(), + ssh_opts: d.merged_settings.ssh_opts.iter().fold("".to_string(), |s, o| format!("{} {}", s, o)), + } + } + + fn build(self) -> Command { + let mut cmd = Command::new("nix"); + + cmd.arg("copy"); + + if self.fast_connection { + cmd.arg("--substitute-on-destination"); + } + + if !self.check_sigs { + cmd.arg("--no-check-sigs"); + } + cmd + .arg("--to") + .arg(self.ssh_uri) + .arg(self.closure) + .env("NIX_SSHOPTS", self.ssh_opts); + //cmd.what_is_this; + cmd + } +} + +pub struct BuildCommand<'a> { + node_name: &'a str, + profile_name: &'a str, + keep_result: &'a bool, + result_path: &'a str, + extra_build_args: &'a Vec, +} + +impl<'a> BuildCommand<'a> { + pub fn from_data(d: &'a data::DeployData) -> Self { + BuildCommand { + node_name: d.node_name.as_str(), + profile_name: d.profile_name.as_str(), + keep_result: &d.flags.keep_result, + result_path: &d.flags.result_path.as_deref().unwrap_or("./.deploy-gc"), + extra_build_args: &d.flags.extra_build_args, + } + } + + fn build(self, derivation_name: &str, supports_flakes: bool) -> Command { + let mut cmd = if supports_flakes { + Command::new("nix") + } else { + Command::new("nix-build") + }; - // `nix-store --query --deriver` doesn't work on invalid paths, so we parse output of show-derivation :( - let mut show_derivation_command = Command::new("nix"); + if supports_flakes { + cmd.arg("build").arg(derivation_name) + } else { + cmd.arg(derivation_name) + }; - show_derivation_command - .arg("show-derivation") - .arg(&data.deploy_data.profile.profile_settings.path); + match (self.keep_result, supports_flakes) { + (true, _) => { + cmd.arg("--out-link").arg(format!( + "{}/{}/{}", + self.result_path, self.node_name, self.profile_name + )) + } + (false, false) => cmd.arg("--no-out-link"), + (false, true) => cmd.arg("--no-link"), + }; + cmd.args(self.extra_build_args.iter()); + // cmd.what_is_this; + cmd + } +} - let show_derivation_output = show_derivation_command +pub async fn push_profile( + supports_flakes: bool, + show_derivation: ShowDerivationCommand<'_>, + build: BuildCommand<'_>, + sign: SignCommand<'_>, + copy: CopyCommand<'_>, +) -> Result<(), PushProfileError> { + let node_name = build.node_name; + let profile_name = build.profile_name; + let closure = show_derivation.closure; + + debug!("Finding the deriver of store path for {}", closure); + let mut show_derivation_cmd = show_derivation.build(); + + let show_derivation_output = show_derivation_cmd .output() .await .map_err(PushProfileError::ShowDerivation)?; @@ -93,41 +220,11 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr .next() .ok_or(PushProfileError::ShowDerivationEmpty)?; - info!( - "Building profile `{}` for node `{}`", - data.deploy_data.profile_name, data.deploy_data.node_name - ); - - let mut build_command = if *data.supports_flakes { - Command::new("nix") - } else { - Command::new("nix-build") - }; - - if *data.supports_flakes { - build_command.arg("build").arg(derivation_name) - } else { - build_command.arg(derivation_name) - }; + info!("Building profile `{}` for node `{}`", profile_name, node_name); - match (data.keep_result, data.supports_flakes) { - (true, _) => { - let result_path = data.result_path.unwrap_or("./.deploy-gc"); + let mut build_cmd = build.build(*derivation_name, supports_flakes); - build_command.arg("--out-link").arg(format!( - "{}/{}/{}", - result_path, data.deploy_data.node_name, data.deploy_data.profile_name - )) - } - (false, false) => build_command.arg("--no-out-link"), - (false, true) => build_command.arg("--no-link"), - }; - - for extra_arg in data.extra_build_args { - build_command.arg(extra_arg); - } - - let build_exit_status = build_command + let build_exit_status = build_cmd // Logging should be in stderr, this just stops the store path from printing for no reason .stdout(Stdio::null()) .status() @@ -139,42 +236,19 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr a => return Err(PushProfileError::BuildExit(a)), }; - if !Path::new( - format!( - "{}/deploy-rs-activate", - data.deploy_data.profile.profile_settings.path - ) - .as_str(), - ) - .exists() - { + if !Path::new(format!("{}/deploy-rs-activate", closure).as_str()).exists() { return Err(PushProfileError::DeployRsActivateDoesntExist); } - if !Path::new( - format!( - "{}/activate-rs", - data.deploy_data.profile.profile_settings.path - ) - .as_str(), - ) - .exists() - { + if !Path::new(format!("{}/activate-rs", closure).as_str()).exists() { return Err(PushProfileError::ActivateRsDoesntExist); } if let Ok(local_key) = std::env::var("LOCAL_KEY") { - info!( - "Signing key present! Signing profile `{}` for node `{}`", - data.deploy_data.profile_name, data.deploy_data.node_name - ); + info!("Signing key present! Signing profile `{}` for node `{}`", profile_name, node_name); - let sign_exit_status = Command::new("nix") - .arg("sign-paths") - .arg("-r") - .arg("-k") - .arg(local_key) - .arg(&data.deploy_data.profile.profile_settings.path) + let mut sign_cmd = sign.build(local_key); + let sign_exit_status = sign_cmd .status() .await .map_err(PushProfileError::Sign)?; @@ -185,30 +259,11 @@ pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileEr }; } - info!( - "Copying profile `{}` to node `{}`", - data.deploy_data.profile_name, data.deploy_data.node_name - ); - - let mut copy_command = Command::new("nix"); - copy_command.arg("copy"); + info!("Copying profile `{}` to node `{}`", profile_name, node_name); - if data.deploy_data.merged_settings.fast_connection != Some(true) { - copy_command.arg("--substitute-on-destination"); - } - - if !data.check_sigs { - copy_command.arg("--no-check-sigs"); - } + let mut copy_cmd = copy.build(); - let copy_exit_status = copy_command - .arg("--to") - .arg(data.deploy_data.ssh_uri()?) - .arg(&data.deploy_data.profile.profile_settings.path) - .env( - "NIX_SSHOPTS", - data.deploy_data.ssh_opts()?.fold("".to_string(), |s, o| format!("{} {}", s, o)) - ) + let copy_exit_status = copy_cmd .status() .await .map_err(PushProfileError::Copy)?; From bea6fe002d595c569020a62c20e3d8bc9538c1fb Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 27 Aug 2021 10:43:49 -0600 Subject: [PATCH 12/53] run cargo fmt --- src/cli.rs | 39 ++++++------ src/data.rs | 162 +++++++++++++++++++++++++++++++------------------- src/deploy.rs | 21 ++++--- src/flake.rs | 2 +- src/lib.rs | 6 +- src/push.rs | 46 +++++++------- 6 files changed, 153 insertions(+), 123 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index fe8b1fa5..e088be39 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use clap::{ArgMatches, Clap, FromArgMatches}; use crate as deploy; -use self::deploy::{data, settings, flake}; +use self::deploy::{data, flake, settings}; use log::{debug, error, info, warn}; use serde::Serialize; use std::process::Stdio; @@ -65,9 +65,7 @@ struct PromptPart<'a> { ssh_opts: &'a [String], } -fn print_deployment( - parts: &[&data::DeployData], -) -> Result<(), toml::ser::Error> { +fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> { let mut part_map: HashMap> = HashMap::new(); for data in parts { @@ -104,9 +102,7 @@ pub enum PromptDeploymentError { Cancelled, } -fn prompt_deployment( - parts: &[&data::DeployData], -) -> Result<(), PromptDeploymentError> { +fn prompt_deployment(parts: &[&data::DeployData]) -> Result<(), PromptDeploymentError> { print_deployment(parts)?; info!("Are you sure you want to deploy these profiles?"); @@ -179,16 +175,10 @@ async fn run_deploy( cmd_settings: settings::GenericSettings, cmd_flags: data::Flags, ) -> Result<(), RunDeployError> { - let deploy_datas_ = targets.into_iter().zip(&settings) - .map( - |(target, root)| - target.resolve( - &root, - &cmd_settings, - &cmd_flags, - hostname.as_deref(), - ) - ) + let deploy_datas_ = targets + .into_iter() + .zip(&settings) + .map(|(target, root)| target.resolve(&root, &cmd_settings, &cmd_flags, hostname.as_deref())) .collect::>>, data::ResolveTargetError>>()?; let deploy_datas: Vec<&data::DeployData<'_>> = deploy_datas_.iter().flatten().collect(); @@ -229,7 +219,8 @@ async fn run_deploy( deploy::deploy::ActivateCommand::from_data(&deploy_data), deploy::deploy::WaitCommand::from_data(&deploy_data), deploy::deploy::ConfirmCommand::from_data(&deploy_data), - ).await + ) + .await { error!("{}", e); if cmd_flags.dry_activate { @@ -247,7 +238,8 @@ async fn run_deploy( &deploy_data.profile_name, deploy::deploy::SshCommand::from_data(&deploy_data)?, deploy::deploy::RevokeCommand::from_data(&deploy_data), - ).await?; + ) + .await?; } } } @@ -305,14 +297,17 @@ pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { let targets: Vec = deploys .into_iter() .map(|f| f.parse::()) - .collect::, data::ParseTargetError>>()?; + .collect::, data::ParseTargetError>>( + )?; if !opts.flags.skip_checks { for target in targets.iter() { - flake::check_deployment(supports_flakes, &target.repo, &opts.flags.extra_build_args).await?; + flake::check_deployment(supports_flakes, &target.repo, &opts.flags.extra_build_args) + .await?; } } - let settings = flake::get_deployment_data(supports_flakes, &targets, &opts.flags.extra_build_args).await?; + let settings = + flake::get_deployment_data(supports_flakes, &targets, &opts.flags.extra_build_args).await?; run_deploy( targets, settings, diff --git a/src/data.rs b/src/data.rs index 6fd4473a..4d770b5f 100644 --- a/src/data.rs +++ b/src/data.rs @@ -3,11 +3,11 @@ // // SPDX-License-Identifier: MPL-2.0 +use clap::Clap; use linked_hash_set::LinkedHashSet; -use rnix::{types::*, SyntaxKind::*}; use merge::Merge; +use rnix::{types::*, SyntaxKind::*}; use thiserror::Error; -use clap::Clap; use crate::settings; @@ -47,19 +47,30 @@ impl<'a> Target { hostname: Option<&'a str>, ) -> Result>, ResolveTargetError> { match self { - Target{repo, node: Some(node), profile} => { + Target { + repo, + node: Some(node), + profile, + } => { let node_ = match r.nodes.get(&node) { Some(x) => x, - None => return Err(ResolveTargetError::NodeNotFound( - node.to_owned(), repo.to_owned() - )), + None => { + return Err(ResolveTargetError::NodeNotFound( + node.to_owned(), + repo.to_owned(), + )) + } }; if let Some(profile) = profile { let profile_ = match node_.node_settings.profiles.get(&profile) { Some(x) => x, - None => return Err(ResolveTargetError::ProfileNotFound( - profile.to_owned(), node.to_owned(), repo.to_owned() - )), + None => { + return Err(ResolveTargetError::ProfileNotFound( + profile.to_owned(), + node.to_owned(), + repo.to_owned(), + )) + } }; Ok({ let d = DeployData::new( @@ -76,45 +87,58 @@ impl<'a> Target { vec![d] }) } else { - let ordered_profile_names: LinkedHashSet:: = node_.node_settings.profiles_order.iter().cloned().collect(); - let profile_names: LinkedHashSet:: = node_.node_settings.profiles.keys().cloned().collect(); - let prioritized_profile_names: LinkedHashSet::<&String> = ordered_profile_names.union(&profile_names).collect(); - Ok( - prioritized_profile_names + let ordered_profile_names: LinkedHashSet = + node_.node_settings.profiles_order.iter().cloned().collect(); + let profile_names: LinkedHashSet = + node_.node_settings.profiles.keys().cloned().collect(); + let prioritized_profile_names: LinkedHashSet<&String> = + ordered_profile_names.union(&profile_names).collect(); + Ok(prioritized_profile_names .iter() - .map( - |p| - Target{repo: repo.to_owned(), node: Some(node.to_owned()), profile: Some(p.to_string())}.resolve( - r, cs, cf, hostname, - ) - ) + .map(|p| { + Target { + repo: repo.to_owned(), + node: Some(node.to_owned()), + profile: Some(p.to_string()), + } + .resolve(r, cs, cf, hostname) + }) .collect::>>, ResolveTargetError>>()? - .into_iter().flatten().collect::>>() - ) + .into_iter() + .flatten() + .collect::>>()) } - }, - Target{repo, node: None, profile: None} => { + } + Target { + repo, + node: None, + profile: None, + } => { if let Some(hostname) = hostname { todo!() // create issue to discuss: - // if allowed, it would be really awkward - // to override the hostname for a series of nodes at once + // if allowed, it would be really awkward + // to override the hostname for a series of nodes at once } - Ok( - r.nodes + Ok(r.nodes .iter() - .map( - |(n, _)| - Target{repo: repo.to_owned(), node: Some(n.to_string()), profile: None}.resolve( - r, cs, cf, hostname, - ) - ) + .map(|(n, _)| { + Target { + repo: repo.to_owned(), + node: Some(n.to_string()), + profile: None, + } + .resolve(r, cs, cf, hostname) + }) .collect::>>, ResolveTargetError>>()? - .into_iter().flatten().collect::>>() - ) - }, - Target{repo, node: None, profile: Some(_)} => return Err(ResolveTargetError::ProfileWithoutNode( - repo.to_owned() - )) + .into_iter() + .flatten() + .collect::>>()) + } + Target { + repo, + node: None, + profile: Some(_), + } => return Err(ResolveTargetError::ProfileWithoutNode(repo.to_owned())), } } } @@ -209,7 +233,9 @@ fn test_deploy_target_from_str() { ); assert_eq!( - "../deploy/examples/system#computer.\"something.nix\"".parse::().unwrap(), + "../deploy/examples/system#computer.\"something.nix\"" + .parse::() + .unwrap(), Target { repo: "../deploy/examples/system".to_string(), node: Some("computer".to_string()), @@ -218,7 +244,9 @@ fn test_deploy_target_from_str() { ); assert_eq!( - "../deploy/examples/system#\"example.com\".system".parse::().unwrap(), + "../deploy/examples/system#\"example.com\".system" + .parse::() + .unwrap(), Target { repo: "../deploy/examples/system".to_string(), node: Some("example.com".to_string()), @@ -227,7 +255,9 @@ fn test_deploy_target_from_str() { ); assert_eq!( - "../deploy/examples/system#example".parse::().unwrap(), + "../deploy/examples/system#example" + .parse::() + .unwrap(), Target { repo: "../deploy/examples/system".to_string(), node: Some("example".to_string()), @@ -236,7 +266,9 @@ fn test_deploy_target_from_str() { ); assert_eq!( - "../deploy/examples/system#example.system".parse::().unwrap(), + "../deploy/examples/system#example.system" + .parse::() + .unwrap(), Target { repo: "../deploy/examples/system".to_string(), node: Some("example".to_string()), @@ -287,41 +319,40 @@ pub enum DeployDataError { pub struct Flags { /// Check signatures when using `nix copy` #[clap(short, long)] - pub checksigs: bool, + pub checksigs: bool, /// Use the interactive prompt before deployment #[clap(short, long)] - pub interactive: bool, + pub interactive: bool, /// Extra arguments to be passed to nix build - pub extra_build_args: Vec, + pub extra_build_args: Vec, /// Print debug logs to output #[clap(short, long)] - pub debug_logs: bool, + pub debug_logs: bool, /// Directory to print logs to (including the background activation process) #[clap(long)] - pub log_dir: Option, + pub log_dir: Option, /// Keep the build outputs of each built profile #[clap(short, long)] - pub keep_result: bool, + pub keep_result: bool, /// Location to keep outputs from built profiles in #[clap(short, long)] - pub result_path: Option, + pub result_path: Option, /// Skip the automatic pre-build checks #[clap(short, long)] - pub skip_checks: bool, + pub skip_checks: bool, /// Make activation wait for confirmation, or roll back after a period of time /// Show what will be activated on the machines #[clap(long)] - pub dry_activate: bool, + pub dry_activate: bool, /// Revoke all previously succeeded deploys when deploying multiple profiles #[clap(long)] - pub rollback_succeeded: bool, + pub rollback_succeeded: bool, } impl<'a> DeployData<'a> { - fn new( repo: String, node_name: String, @@ -345,16 +376,23 @@ impl<'a> DeployData<'a> { Some(ref x) => x.to_owned(), None => "/tmp".to_string(), }; - let profile_user = if let Some(ref x) = merged_settings.user { x.to_owned() } else { - if let Some(ref x) = merged_settings.ssh_user { x.to_owned() } else { - return Err(DeployDataError::NoProfileUser(profile_name, node_name)) + let profile_user = if let Some(ref x) = merged_settings.user { + x.to_owned() + } else { + if let Some(ref x) = merged_settings.ssh_user { + x.to_owned() + } else { + return Err(DeployDataError::NoProfileUser(profile_name, node_name)); } }; let profile_path = match profile.profile_settings.profile_path { - None => format!("/nix/var/nix/profiles/{}", match &profile_user[..] { - "root" => profile_name.to_owned(), - _ => format!("per-user/{}/{}", profile_user, profile_name), - }), + None => format!( + "/nix/var/nix/profiles/{}", + match &profile_user[..] { + "root" => profile_name.to_owned(), + _ => format!("per-user/{}/{}", profile_user, profile_name), + } + ), Some(ref x) => x.to_owned(), }; let ssh_user = match merged_settings.ssh_user { diff --git a/src/deploy.rs b/src/deploy.rs index d517d6af..eb5eca36 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -19,7 +19,7 @@ impl<'a> SshCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Result { let hoststring = format!("{}@{}", &d.ssh_user, d.hostname); let opts = d.merged_settings.ssh_opts.as_ref(); - Ok(SshCommand {hoststring, opts}) + Ok(SshCommand { hoststring, opts }) } fn build(&self) -> Command { @@ -75,10 +75,7 @@ impl<'a> ActivateCommand<'a> { cmd, self.closure, self.profile_path, self.temp_path ); - cmd = format!( - "{} --confirm-timeout {}", - cmd, self.confirm_timeout - ); + cmd = format!("{} --confirm-timeout {}", cmd, self.confirm_timeout); if self.magic_rollback { cmd = format!("{} --magic-rollback", cmd); @@ -214,7 +211,6 @@ impl<'a> RevokeCommand<'a> { } } - fn build(self) -> String { let mut cmd = format!("{}/activate-rs", self.closure); @@ -272,7 +268,6 @@ impl<'a> ConfirmCommand<'a> { } } - fn build(self) -> String { let lock_path = super::make_lock_path(&self.temp_path, &self.closure); @@ -298,7 +293,6 @@ pub async fn confirm_profile( ssh: SshCommand<'_>, confirm: ConfirmCommand<'_>, ) -> Result<(), ConfirmProfileError> { - let mut ssh_confirm_cmd = ssh.build(); let confirm_cmd = confirm.build(); @@ -352,7 +346,10 @@ pub async fn deploy_profile( confirm: ConfirmCommand<'_>, ) -> Result<(), DeployProfileError> { if !activate.dry_activate { - info!("Activating profile `{}` for node `{}`", profile_name, node_name); + info!( + "Activating profile `{}` for node `{}`", + profile_name, node_name + ); } let dry_activate = &activate.dry_activate.clone(); let magic_rollback = &activate.magic_rollback.clone(); @@ -392,7 +389,6 @@ pub async fn deploy_profile( info!("Creating activation waiter"); - let mut ssh_wait_cmd = ssh.build(); let (send_activate, recv_activate) = tokio::sync::oneshot::channel(); @@ -455,7 +451,10 @@ pub async fn revoke( ssh: SshCommand<'_>, revoke: RevokeCommand<'_>, ) -> Result<(), RevokeProfileError> { - info!("Revoking profile `{}` for node `{}`", profile_name, node_name); + info!( + "Revoking profile `{}` for node `{}`", + profile_name, node_name + ); let revoke_cmd = revoke.build(); debug!("Constructed revoke command: {}", revoke_cmd); diff --git a/src/flake.rs b/src/flake.rs index 22b6de23..691e5e4e 100644 --- a/src/flake.rs +++ b/src/flake.rs @@ -6,9 +6,9 @@ use crate as deploy; use self::deploy::{data, settings}; +use futures_util::stream::{StreamExt, TryStreamExt}; use log::{error, info}; use std::process::Stdio; -use futures_util::stream::{StreamExt, TryStreamExt}; use thiserror::Error; use tokio::process::Command; diff --git a/src/lib.rs b/src/lib.rs index e530a8b8..b943546c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -139,9 +139,9 @@ pub fn init_logger( Ok(()) } -pub mod settings; +pub mod cli; pub mod data; -pub mod flake; pub mod deploy; +pub mod flake; pub mod push; -pub mod cli; +pub mod settings; diff --git a/src/push.rs b/src/push.rs index e1c0d88a..2f46273d 100644 --- a/src/push.rs +++ b/src/push.rs @@ -63,9 +63,7 @@ impl<'a> ShowDerivationCommand<'a> { // `nix-store --query --deriver` doesn't work on invalid paths, so we parse output of show-derivation :( let mut cmd = Command::new("nix"); - cmd - .arg("show-derivation") - .arg(&self.closure); + cmd.arg("show-derivation").arg(&self.closure); //cmd.what_is_this; cmd } @@ -85,8 +83,7 @@ impl<'a> SignCommand<'a> { fn build(self, local_key: String) -> Command { let mut cmd = Command::new("nix"); - cmd - .arg("sign-paths") + cmd.arg("sign-paths") .arg("-r") .arg("-k") .arg(local_key) @@ -111,7 +108,11 @@ impl<'a> CopyCommand<'a> { fast_connection: d.merged_settings.fast_connection.unwrap_or(false), check_sigs: &d.flags.checksigs, ssh_uri: d.ssh_uri.as_str(), - ssh_opts: d.merged_settings.ssh_opts.iter().fold("".to_string(), |s, o| format!("{} {}", s, o)), + ssh_opts: d + .merged_settings + .ssh_opts + .iter() + .fold("".to_string(), |s, o| format!("{} {}", s, o)), } } @@ -127,8 +128,7 @@ impl<'a> CopyCommand<'a> { if !self.check_sigs { cmd.arg("--no-check-sigs"); } - cmd - .arg("--to") + cmd.arg("--to") .arg(self.ssh_uri) .arg(self.closure) .env("NIX_SSHOPTS", self.ssh_opts); @@ -170,12 +170,10 @@ impl<'a> BuildCommand<'a> { }; match (self.keep_result, supports_flakes) { - (true, _) => { - cmd.arg("--out-link").arg(format!( - "{}/{}/{}", - self.result_path, self.node_name, self.profile_name - )) - } + (true, _) => cmd.arg("--out-link").arg(format!( + "{}/{}/{}", + self.result_path, self.node_name, self.profile_name + )), (false, false) => cmd.arg("--no-out-link"), (false, true) => cmd.arg("--no-link"), }; @@ -220,7 +218,10 @@ pub async fn push_profile( .next() .ok_or(PushProfileError::ShowDerivationEmpty)?; - info!("Building profile `{}` for node `{}`", profile_name, node_name); + info!( + "Building profile `{}` for node `{}`", + profile_name, node_name + ); let mut build_cmd = build.build(*derivation_name, supports_flakes); @@ -245,13 +246,13 @@ pub async fn push_profile( } if let Ok(local_key) = std::env::var("LOCAL_KEY") { - info!("Signing key present! Signing profile `{}` for node `{}`", profile_name, node_name); + info!( + "Signing key present! Signing profile `{}` for node `{}`", + profile_name, node_name + ); let mut sign_cmd = sign.build(local_key); - let sign_exit_status = sign_cmd - .status() - .await - .map_err(PushProfileError::Sign)?; + let sign_exit_status = sign_cmd.status().await.map_err(PushProfileError::Sign)?; match sign_exit_status.code() { Some(0) => (), @@ -263,10 +264,7 @@ pub async fn push_profile( let mut copy_cmd = copy.build(); - let copy_exit_status = copy_cmd - .status() - .await - .map_err(PushProfileError::Copy)?; + let copy_exit_status = copy_cmd.status().await.map_err(PushProfileError::Copy)?; match copy_exit_status.code() { Some(0) => (), From dad754206c47dcdd8544e450eb2444a5d5d1cad7 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 27 Aug 2021 11:04:18 -0600 Subject: [PATCH 13/53] fix clippy lints --- src/data.rs | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/src/data.rs b/src/data.rs index 4d770b5f..5cbbb8e3 100644 --- a/src/data.rs +++ b/src/data.rs @@ -54,12 +54,7 @@ impl<'a> Target { } => { let node_ = match r.nodes.get(&node) { Some(x) => x, - None => { - return Err(ResolveTargetError::NodeNotFound( - node.to_owned(), - repo.to_owned(), - )) - } + None => return Err(ResolveTargetError::NodeNotFound(node.to_owned(), repo)), }; if let Some(profile) = profile { let profile_ = match node_.node_settings.profiles.get(&profile) { @@ -68,13 +63,13 @@ impl<'a> Target { return Err(ResolveTargetError::ProfileNotFound( profile.to_owned(), node.to_owned(), - repo.to_owned(), + repo, )) } }; Ok({ let d = DeployData::new( - repo.to_owned(), + repo, node.to_owned(), profile.to_owned(), &r.generic_settings, @@ -114,7 +109,7 @@ impl<'a> Target { node: None, profile: None, } => { - if let Some(hostname) = hostname { + if let Some(_hostname) = hostname { todo!() // create issue to discuss: // if allowed, it would be really awkward // to override the hostname for a series of nodes at once @@ -138,7 +133,7 @@ impl<'a> Target { repo, node: None, profile: Some(_), - } => return Err(ResolveTargetError::ProfileWithoutNode(repo.to_owned())), + } => Err(ResolveTargetError::ProfileWithoutNode(repo)), } } } @@ -163,7 +158,7 @@ impl std::str::FromStr for Target { Some(x) => x, None => { return Ok(Target { - repo: repo.to_owned(), + repo, node: None, profile: None, }) @@ -205,9 +200,9 @@ impl std::str::FromStr for Target { } Ok(Target { - repo: repo.to_owned(), - node: node, - profile: profile, + repo, + node, + profile, }) } } @@ -352,6 +347,7 @@ pub struct Flags { pub rollback_succeeded: bool, } +#[allow(clippy::too_many_arguments)] impl<'a> DeployData<'a> { fn new( repo: String, @@ -378,17 +374,16 @@ impl<'a> DeployData<'a> { }; let profile_user = if let Some(ref x) = merged_settings.user { x.to_owned() + } else if let Some(ref x) = merged_settings.ssh_user { + x.to_owned() } else { - if let Some(ref x) = merged_settings.ssh_user { - x.to_owned() - } else { - return Err(DeployDataError::NoProfileUser(profile_name, node_name)); - } + return Err(DeployDataError::NoProfileUser(profile_name, node_name)); }; let profile_path = match profile.profile_settings.profile_path { None => format!( "/nix/var/nix/profiles/{}", match &profile_user[..] { + #[allow(clippy::redundant_clone)] "root" => profile_name.to_owned(), _ => format!("per-user/{}/{}", profile_user, profile_name), } @@ -413,8 +408,10 @@ impl<'a> DeployData<'a> { repo, node_name, profile_name, + flags, node, profile, + merged_settings, hostname, ssh_user, ssh_uri, @@ -422,8 +419,6 @@ impl<'a> DeployData<'a> { profile_path, profile_user, sudo, - flags, - merged_settings, }) } } From 03d9974fe30b0b24525ffa6bbc73c5a17409eed5 Mon Sep 17 00:00:00 2001 From: Michael Fellinger Date: Mon, 30 Aug 2021 18:40:03 +0200 Subject: [PATCH 14/53] expose base from activate.custom --- flake.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index 03f4a253..172b4a7b 100644 --- a/flake.nix +++ b/flake.nix @@ -49,7 +49,7 @@ custom = { __functor = customSelf: base: activate: - final.buildEnv { + (final.buildEnv { name = ("activatable-" + base.name); paths = [ @@ -80,10 +80,10 @@ destination = "/activate-rs"; }) ]; - }; + } // customSelf); }; - nixos = base: (custom // { dryActivate = "$PROFILE/bin/switch-to-configuration dry-activate"; }) base.config.system.build.toplevel '' + nixos = base: (custom // { inherit base; dryActivate = "$PROFILE/bin/switch-to-configuration dry-activate"; }) base.config.system.build.toplevel '' # work around https://github.com/NixOS/nixpkgs/issues/73404 cd /tmp From 120c31f6bf9d5b86e544029aadd4bc14d05680d8 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Thu, 4 Nov 2021 15:40:50 -0600 Subject: [PATCH 15/53] update dependencies Also requires a newer version of Rust, so pull in fenix to get the latest rust release. Some code fixes were necessary after dependency update as well. --- Cargo.lock | 525 ++++++++++++++++---------------------------- Cargo.toml | 4 +- flake.lock | 56 ++++- flake.nix | 224 +++++++++++-------- src/bin/activate.rs | 22 +- src/cli.rs | 6 +- src/data.rs | 4 +- src/settings.rs | 4 +- 8 files changed, 381 insertions(+), 464 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65b9f993..7f1a93af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] -[[package]] -name = "anymap" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33954243bd79057c2de7338850b85983a44588021f8a5fee574a8888c6de4344" - [[package]] name = "atty" version = "0.2.14" @@ -25,7 +19,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -36,15 +30,15 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cbitset" @@ -55,12 +49,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -77,14 +65,14 @@ dependencies = [ "num-integer", "num-traits", "time", - "winapi 0.3.9", + "winapi", ] [[package]] name = "clap" -version = "3.0.0-beta.2" +version = "3.0.0-beta.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd1061998a501ee7d4b6d449020df3266ca3124b941ec56cf2005c3779ca142" +checksum = "feff3878564edb93745d58cf63e17b63f24142506e7a20c87a5521ed7bfb1d63" dependencies = [ "atty", "bitflags", @@ -95,15 +83,14 @@ dependencies = [ "strsim", "termcolor", "textwrap", - "unicode-width", - "vec_map", + "unicase", ] [[package]] name = "clap_derive" -version = "3.0.0-beta.2" +version = "3.0.0-beta.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370f715b81112975b1b69db93e0b56ea4cd4e5002ac43b2da8474106a54096a1" +checksum = "8b15c6b4f786ffb6192ffe65a36855bc1fc2444bcd0945ae16748dcd6ed7d0d3" dependencies = [ "heck", "proc-macro-error", @@ -114,22 +101,21 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ + "cfg-if", "crossbeam-utils", - "maybe-uninit", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg", - "cfg-if 0.1.10", + "cfg-if", "lazy_static", ] @@ -158,11 +144,17 @@ dependencies = [ "yn", ] +[[package]] +name = "dunce" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "453440c271cf5577fd2a40e4942540cb7d0d2f85e27c8d07dd0023c925a67541" + [[package]] name = "envmnt" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfac51e9996e41d78a943227b7f313efcebf545b21584a0e213b956a062e11e" +checksum = "0f96dd862f12fac698dec3932dff0e6fb34bffeb5515ae5932d620cfe076571e" dependencies = [ "fsio", "indexmap", @@ -170,21 +162,21 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c122a393ea57648015bf06fbd3d372378992e86b9ff5a7a497b076a28c79efe" +checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", - "winapi 0.3.9", + "winapi", ] [[package]] name = "flexi_logger" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37586928c27a25ff5fce49ff3f8e071b3beeef48b4f004fe7d40d75a26e3db5" +checksum = "291b6ce7b3ed2dda82efa6aee4c6bdb55fd11bc88b06c55b01851e94b96e5322" dependencies = [ "atty", "chrono", @@ -205,59 +197,37 @@ dependencies = [ "libc", ] -[[package]] -name = "fsevent" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f347202c95c98805c216f9e1df210e8ebaec9fdb2365700a43c10797a35e63" -dependencies = [ - "bitflags", - "fsevent-sys", -] - [[package]] name = "fsevent-sys" -version = "3.0.2" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a29c77f1ca394c3e73a9a5d24cfcabb734682d9634fc398f2204a63c994120" +checksum = "5c0e564d24da983c053beff1bb7178e237501206840a3e6bf4e267b9e8ae734a" dependencies = [ "libc", ] [[package]] name = "fsio" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50045aa8931ae01afbc5d72439e8f57f326becb8c70d07dfc816778eff3d167" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +checksum = "09e87827efaf94c7a44b562ff57de06930712fe21b530c3797cdede26e6377eb" dependencies = [ - "bitflags", - "fuchsia-zircon-sys", + "dunce", ] -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "futures-core" -version = "0.3.8" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-macro" -version = "0.3.8" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -266,23 +236,21 @@ dependencies = [ [[package]] name = "futures-task" -version = "0.3.8" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" -dependencies = [ - "once_cell", -] +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-util" -version = "0.3.8" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ + "autocfg", "futures-core", "futures-macro", "futures-task", - "pin-project", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -297,33 +265,33 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "indexmap" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", "hashbrown", @@ -331,9 +299,9 @@ dependencies = [ [[package]] name = "inotify" -version = "0.8.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46dd0a94b393c730779ccfd2a872b67b1eb67be3fc33082e733bdb38b5fde4d4" +checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" dependencies = [ "bitflags", "inotify-sys", @@ -342,45 +310,46 @@ dependencies = [ [[package]] name = "inotify-sys" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4563555856585ab3180a5bf0b2f9f8d301a728462afffc8195b3f5394229c55" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" dependencies = [ "libc", ] [[package]] name = "instant" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] -name = "iovec" -version = "0.1.4" +name = "itoa" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] -name = "itoa" -version = "0.4.6" +name = "kqueue" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "058a107a784f8be94c7d35c1300f4facced2e93d2fbe5b1452b44e905ddca4a9" +dependencies = [ + "kqueue-sys", + "libc", +] [[package]] -name = "kernel32-sys" -version = "0.2.2" +name = "kqueue-sys" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +checksum = "8367585489f01bc55dd27404dcf56b95e6da061a256a666ab23be9ba96a2e587" dependencies = [ - "winapi 0.2.8", - "winapi-build", + "bitflags", + "libc", ] [[package]] @@ -389,17 +358,11 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.81" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" [[package]] name = "linked-hash-map" @@ -418,33 +381,27 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ "scopeguard", ] [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "merge" @@ -470,99 +427,42 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.23" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33bc887064ef1fd66020c9adfc45bb9f33d75a42096c81e7c56c65b75dd1a8b" -dependencies = [ - "libc", - "log", - "miow 0.3.6", + "miow", "ntapi", - "winapi 0.3.9", -] - -[[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log", - "mio 0.6.23", - "slab", + "winapi", ] [[package]] name = "miow" -version = "0.2.2" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", -] - -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "winapi", ] [[package]] name = "notify" -version = "5.0.0-pre.4" +version = "5.0.0-pre.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b946889dfdad884379cd56367d93b6d0ce8889cc027d26a69a3a31c0a03bb5" +checksum = "245d358380e2352c2d020e8ee62baac09b3420f1f6c012a31326cfced4ad487d" dependencies = [ - "anymap", "bitflags", "crossbeam-channel", "filetime", - "fsevent", "fsevent-sys", "inotify", + "kqueue", "libc", - "mio 0.6.23", - "mio-extras", + "mio", "walkdir", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -571,7 +471,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -605,21 +505,24 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.5.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "os_str_bytes" -version = "2.4.0" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb2e1c3ee07430c2cf76151675e583e0f19985fa6efae47d6848a3e2c824f85" +checksum = "addaa943333a514159c80c97ff4a93306530d965d27e139188283cd13e06a799" +dependencies = [ + "memchr", +] [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", @@ -628,43 +531,23 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.1" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", "smallvec", - "winapi 0.3.9", -] - -[[package]] -name = "pin-project" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "winapi", ] [[package]] name = "pin-project-lite" -version = "0.2.0" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -704,57 +587,59 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] [[package]] name = "quote" -version = "1.0.7" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" -version = "0.1.57" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] [[package]] name = "regex" -version = "1.4.2" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "rnix" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbbea4c714e5bbf462fa4316ddf45875d8f0e28e5db81050b5f9ce99746c6863" +checksum = "0a9b645f0edba447dbfc6473dd22999f46a1d00ab39e777a2713a1cf34a1597b" dependencies = [ "cbitset", "rowan", @@ -801,18 +686,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.118" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.118" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -821,9 +706,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.60" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" dependencies = [ "itoa", "ryu", @@ -832,9 +717,9 @@ dependencies = [ [[package]] name = "signal-hook" -version = "0.3.1" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3799fa361789a685db59e3986fb5f6f949e478728b9913c6759f7b014d0372" +checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" dependencies = [ "libc", "signal-hook-registry", @@ -842,24 +727,24 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "smol_str" @@ -867,18 +752,6 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f7909a1d8bc166a862124d84fdc11bda0ea4ed3157ccca662296919c2972db1" -[[package]] -name = "socket2" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "winapi 0.3.9", -] - [[package]] name = "strsim" version = "0.10.0" @@ -887,9 +760,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.73" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ "proc-macro2", "quote", @@ -913,9 +786,9 @@ checksum = "20431e104bfecc1a40872578dbc390e10290a0e9c35fffe3ce6f73c15a9dbfc2" [[package]] name = "textwrap" -version = "0.12.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "203008d98caf094106cfaba70acfed15e18ed3ddb7d94e49baec153a2b462789" +checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" dependencies = [ "unicode-width", ] @@ -928,33 +801,24 @@ checksum = "db3c46be180f1af9673ebb27bc1235396f61ef6965b3fe0dbb2e624deb604f0e" [[package]] name = "thiserror" -version = "1.0.22" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.22" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - [[package]] name = "time" version = "0.1.44" @@ -963,34 +827,34 @@ checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", "wasi", - "winapi 0.3.9", + "winapi", ] [[package]] name = "tokio" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7b349f11a7047e6d1276853e612d152f5e8a352c61917887cc2169e2366b4c" +checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" dependencies = [ "autocfg", "bytes", "libc", "memchr", - "mio 0.7.6", + "mio", "num_cpus", "once_cell", "parking_lot", "pin-project-lite", "signal-hook-registry", "tokio-macros", - "winapi 0.3.9", + "winapi", ] [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" dependencies = [ "proc-macro2", "quote", @@ -999,51 +863,54 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] -name = "unicode-segmentation" -version = "1.7.1" +name = "unicase" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] [[package]] -name = "unicode-width" -version = "0.1.8" +name = "unicode-segmentation" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] -name = "unicode-xid" -version = "0.2.1" +name = "unicode-width" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] -name = "vec_map" -version = "0.8.2" +name = "unicode-xid" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "walkdir" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", - "winapi 0.3.9", + "winapi", "winapi-util", ] @@ -1059,12 +926,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7884773ab69074615cb8f8425d0e53f11710786158704fca70f53e71b0e05504" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -1075,12 +936,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -1093,7 +948,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1102,16 +957,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "yansi" version = "0.5.0" diff --git a/Cargo.toml b/Cargo.toml index c2e34c27..4c24781a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,12 +6,12 @@ name = "deploy-rs" version = "0.1.0" authors = ["notgne2 ", "Serokell "] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "3.0.0-beta.2" +clap = "3.0.0-beta.5" flexi_logger = "0.16" fork = "0.1" futures-util = "0.3.6" diff --git a/flake.lock b/flake.lock index f54ce57e..559ff0c6 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,26 @@ { "nodes": { + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1636007046, + "narHash": "sha256-iN6wKOPLSc06osaycQeW8uXIsc/0THpVxqk9akr1Trk=", + "owner": "nix-community", + "repo": "fenix", + "rev": "4552d289909807a8083ea32652f0e0adaa658447", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, "flake-compat": { "flake": false, "locked": { @@ -23,27 +44,26 @@ ] }, "locked": { - "lastModified": 1622810282, - "narHash": "sha256-4wmvM3/xfD0hCdNDIXVzRMfL4yB1J+DjH6Zte2xbAxk=", - "owner": "nmattia", + "lastModified": 1635777496, + "narHash": "sha256-8y2gyBTD0CMYbiTlmpLNEtvC7c/Al4qIToHTXN0L4kU=", + "owner": "nix-community", "repo": "naersk", - "rev": "e8061169e1495871b56be97c5c51d310fae01374", + "rev": "5bed2dbf074d95627a9c35bf262eb577ade97bb9", "type": "github" }, "original": { - "owner": "nmattia", - "ref": "master", + "owner": "nix-community", "repo": "naersk", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1622972307, - "narHash": "sha256-ENOu0FPCf95iLLoq2txhJtnA2ZpOFhIVBqQVbKM8ra0=", + "lastModified": 1635934775, + "narHash": "sha256-DUkBfZjgeefgqyvFxnkZiOOWXgHP5Y1oKp/Zm+LT05Y=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "d8eb97e3801bde96491535f40483d550b57605b9", + "rev": "4789953e5c1ef6d10e3ff437e5b7ab8eed526942", "type": "github" }, "original": { @@ -55,12 +75,30 @@ }, "root": { "inputs": { + "fenix": "fenix", "flake-compat": "flake-compat", "naersk": "naersk", "nixpkgs": "nixpkgs", "utils": "utils" } }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1635970994, + "narHash": "sha256-X0q/ZtxTofMGV0shLNiMzgxv44/Rt5x84VEmgjOawZg=", + "owner": "rust-analyzer", + "repo": "rust-analyzer", + "rev": "a8247685cfa09084bd620c0877ea1eb3d605d8a2", + "type": "github" + }, + "original": { + "owner": "rust-analyzer", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, "utils": { "locked": { "lastModified": 1622445595, diff --git a/flake.nix b/flake.nix index 172b4a7b..8d0c05a8 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; naersk = { - url = "github:nmattia/naersk/master"; + url = "github:nix-community/naersk"; inputs.nixpkgs.follows = "nixpkgs"; }; utils.url = "github:numtide/flake-utils"; @@ -17,42 +17,41 @@ url = "github:edolstra/flake-compat"; flake = false; }; + fenix.url = "github:nix-community/fenix"; + fenix.inputs.nixpkgs.follows = "nixpkgs"; }; - outputs = { self, nixpkgs, utils, naersk, ... }: - { - overlay = final: prev: - let - naersk-lib = final.callPackage naersk { }; - system = final.system; - isDarwin = final.lib.strings.hasSuffix "-darwin" system; - darwinOptions = final.lib.optionalAttrs isDarwin { - nativeBuildInputs = [ - final.darwin.apple_sdk.frameworks.SystemConfiguration - ]; - }; - in + outputs = { self, nixpkgs, utils, naersk, fenix, ... }: { - deploy-rs = { - - deploy-rs = naersk-lib.buildPackage (darwinOptions // { - root = ./.; - }) // { meta.description = "A Simple multi-profile Nix-flake deploy tool"; }; - - lib = rec { - - setActivate = builtins.trace - "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" - activate.custom; - - activate = rec { - custom = - { - __functor = customSelf: base: activate: - (final.buildEnv { - name = ("activatable-" + base.name); - paths = - [ + overlay = final: prev: + let + system = final.system; + isDarwin = final.lib.strings.hasSuffix "-darwin" system; + darwinOptions = final.lib.optionalAttrs isDarwin { + nativeBuildInputs = + [ final.darwin.apple_sdk.frameworks.SystemConfiguration ]; + }; + in { + deploy-rs = { + + deploy-rs = + final.naersk.buildPackage (darwinOptions // { root = ./.; }) // { + meta.description = + "A Simple multi-profile Nix-flake deploy tool"; + }; + + lib = rec { + + setActivate = builtins.trace + "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" + activate.custom; + + activate = rec { + custom = { + __functor = customSelf: base: activate: + (final.buildEnv { + name = ("activatable-" + base.name); + paths = [ base (final.writeTextFile { name = base.name + "-activate-path"; @@ -62,7 +61,11 @@ if [[ "''${DRY_ACTIVATE:-}" == "1" ]] then - ${customSelf.dryActivate or "echo ${final.writeScript "activate" activate}"} + ${ + customSelf.dryActivate or "echo ${ + final.writeScript "activate" activate + }" + } else ${activate} fi @@ -71,67 +74,97 @@ destination = "/deploy-rs-activate"; }) (final.writeTextFile { - name = base.name + "-activate-rs"; - text = '' + name = base.name + "-activate-rs"; + text = '' #!${final.runtimeShell} - exec ${self.defaultPackage.${system}}/bin/activate "$@" + exec ${ + self.defaultPackage.${system} + }/bin/activate "$@" ''; executable = true; destination = "/activate-rs"; }) ]; - } // customSelf); + } // customSelf); + }; + + nixos = base: + (custom // { + inherit base; + dryActivate = + "$PROFILE/bin/switch-to-configuration dry-activate"; + }) base.config.system.build.toplevel '' + # work around https://github.com/NixOS/nixpkgs/issues/73404 + cd /tmp + + $PROFILE/bin/switch-to-configuration switch + + # https://github.com/serokell/deploy-rs/issues/31 + ${with base.config.boot.loader; + final.lib.optionalString systemd-boot.enable + "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} + ''; + + home-manager = base: + custom base.activationPackage "$PROFILE/activate"; + + noop = base: custom base ":"; }; - nixos = base: (custom // { inherit base; dryActivate = "$PROFILE/bin/switch-to-configuration dry-activate"; }) base.config.system.build.toplevel '' - # work around https://github.com/NixOS/nixpkgs/issues/73404 - cd /tmp - - $PROFILE/bin/switch-to-configuration switch - - # https://github.com/serokell/deploy-rs/issues/31 - ${with base.config.boot.loader; - final.lib.optionalString systemd-boot.enable - "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} - ''; - - home-manager = base: custom base.activationPackage "$PROFILE/activate"; - - noop = base: custom base ":"; - }; - - deployChecks = deploy: builtins.mapAttrs (_: check: check deploy) { - schema = deploy: final.runCommandNoCC "jsonschema-deploy-system" { } '' - ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${final.writeText "deploy.json" (builtins.toJSON deploy)} ${./interface.json} && touch $out - ''; - - activate = deploy: - let - profiles = builtins.concatLists (final.lib.mapAttrsToList (nodeName: node: final.lib.mapAttrsToList (profileName: profile: [ (toString profile.path) nodeName profileName ]) node.profiles) deploy.nodes); - in - final.runCommandNoCC "deploy-rs-check-activate" { } '' - for x in ${builtins.concatStringsSep " " (map (p: builtins.concatStringsSep ":" p) profiles)}; do - profile_path=$(echo $x | cut -f1 -d:) - node_name=$(echo $x | cut -f2 -d:) - profile_name=$(echo $x | cut -f3 -d:) - - test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); - - test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); - done - - touch $out - ''; + deployChecks = deploy: + builtins.mapAttrs (_: check: check deploy) { + schema = deploy: + final.runCommandNoCC "jsonschema-deploy-system" { } '' + ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${ + final.writeText "deploy.json" (builtins.toJSON deploy) + } ${./interface.json} && touch $out + ''; + + activate = deploy: + let + profiles = builtins.concatLists (final.lib.mapAttrsToList + (nodeName: node: + final.lib.mapAttrsToList (profileName: profile: [ + (toString profile.path) + nodeName + profileName + ]) node.profiles) deploy.nodes); + in final.runCommandNoCC "deploy-rs-check-activate" { } '' + for x in ${ + builtins.concatStringsSep " " + (map (p: builtins.concatStringsSep ":" p) profiles) + }; do + profile_path=$(echo $x | cut -f1 -d:) + node_name=$(echo $x | cut -f2 -d:) + profile_name=$(echo $x | cut -f3 -d:) + + test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); + + test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); + done + + touch $out + ''; + }; + }; }; }; - }; - }; - } // - utils.lib.eachDefaultSystem (system: + } // utils.lib.eachDefaultSystem (system: let - pkgs = import nixpkgs { inherit system; overlays = [ self.overlay ]; }; - in - { + pkgs = import nixpkgs { + inherit system; + overlays = [ + naersk.overlay + (final: prev: { + naersk = prev.naersk.override { + inherit (fenix.packages.${prev.system}.stable) cargo rustc; + }; + }) + self.overlay + fenix.overlay + ]; + }; + in { defaultPackage = self.packages."${system}".deploy-rs; packages.deploy-rs = pkgs.deploy-rs.deploy-rs; @@ -142,22 +175,23 @@ }; devShell = pkgs.mkShell { - inputsFrom = [ self.packages.${system}.deploy-rs ]; - RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}"; + RUST_SRC_PATH = pkgs.rustPlatform.rustLibSrc; buildInputs = with pkgs; [ - nixUnstable - cargo - rustc - rust-analyzer - rustfmt - clippy + rust-analyzer-nightly reuse - rust.packages.stable.rustPlatform.rustLibSrc + (pkgs.fenix.stable.withComponents [ + "cargo" + "clippy" + "rust-src" + "rustc" + "rustfmt" + ]) ]; }; checks = { - deploy-rs = self.defaultPackage.${system}.overrideAttrs (super: { doCheck = true; }); + deploy-rs = self.defaultPackage.${system}.overrideAttrs + (super: { doCheck = true; }); }; lib = pkgs.deploy-rs.lib; diff --git a/src/bin/activate.rs b/src/bin/activate.rs index d0cfbe17..14687bdb 100644 --- a/src/bin/activate.rs +++ b/src/bin/activate.rs @@ -6,7 +6,7 @@ use signal_hook::{consts::signal::SIGHUP, iterator::Signals}; -use clap::Clap; +use clap::Parser; use tokio::fs; use tokio::process::Command; @@ -24,7 +24,7 @@ use thiserror::Error; use log::{debug, error, info, warn}; /// Remote activation utility for deploy-rs -#[derive(Clap, Debug)] +#[derive(Parser, Debug)] #[clap(version = "1.0", author = "Serokell ")] struct Opts { /// Print debug logs to output @@ -38,7 +38,7 @@ struct Opts { subcmd: SubCommand, } -#[derive(Clap, Debug)] +#[derive(Parser, Debug)] enum SubCommand { Activate(ActivateOpts), Wait(WaitOpts), @@ -46,7 +46,7 @@ enum SubCommand { } /// Activate a profile -#[derive(Clap, Debug)] +#[derive(Parser, Debug)] struct ActivateOpts { /// The closure to activate closure: String, @@ -75,7 +75,7 @@ struct ActivateOpts { } /// Activate a profile -#[derive(Clap, Debug)] +#[derive(Parser, Debug)] struct WaitOpts { /// The closure to wait for closure: String, @@ -86,7 +86,7 @@ struct WaitOpts { } /// Activate a profile -#[derive(Clap, Debug)] +#[derive(Parser, Debug)] struct RevokeOpts { /// The profile path to revoke profile_path: String, @@ -253,7 +253,7 @@ pub async fn activation_confirmation( let (deleted, done) = mpsc::channel(1); let mut watcher: RecommendedWatcher = - Watcher::new_immediate(move |res: Result| { + Watcher::new(move |res: Result| { let send_result = match res { Ok(e) if e.kind == notify::EventKind::Remove(notify::event::RemoveKind::File) => { debug!("Got worthy removal event, sending on channel"); @@ -271,7 +271,7 @@ pub async fn activation_confirmation( } })?; - watcher.watch(&lock_path, RecursiveMode::NonRecursive)?; + watcher.watch(Path::new(&lock_path), RecursiveMode::NonRecursive)?; if let Err(err) = danger_zone(done, confirm_timeout).await { error!("Error waiting for confirmation event: {}", err); @@ -303,7 +303,7 @@ pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> { // TODO: fix wasteful clone let lock_path = lock_path.clone(); - Watcher::new_immediate(move |res: Result| { + Watcher::new(move |res: Result| { let send_result = match res { Ok(e) if e.kind == notify::EventKind::Create(notify::event::CreateKind::File) => { match &e.paths[..] { @@ -321,11 +321,11 @@ pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> { })? }; - watcher.watch(&temp_path, RecursiveMode::NonRecursive)?; + watcher.watch(Path::new(&temp_path), RecursiveMode::NonRecursive)?; // Avoid a potential race condition by checking for existence after watcher creation if fs::metadata(&lock_path).await.is_ok() { - watcher.unwatch(&temp_path)?; + watcher.unwatch(Path::new(&temp_path))?; return Ok(()); } diff --git a/src/cli.rs b/src/cli.rs index e088be39..29d86757 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use std::io::{stdin, stdout, Write}; -use clap::{ArgMatches, Clap, FromArgMatches}; +use clap::{ArgMatches, FromArgMatches, Parser}; use crate as deploy; @@ -18,7 +18,7 @@ use thiserror::Error; use tokio::process::Command; /// Simple Rust rewrite of a simple Nix Flake deployment tool -#[derive(Clap, Debug, Clone)] +#[derive(Parser, Debug, Clone, Default)] #[clap(version = "1.0", author = "Serokell ")] pub struct Opts { /// The flake to deploy @@ -273,7 +273,7 @@ pub enum RunError { pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { let opts = match args { - Some(o) => ::from_arg_matches(o), + Some(o) => ::from_arg_matches(o).unwrap_or_default(), None => Opts::parse(), }; diff --git a/src/data.rs b/src/data.rs index 5cbbb8e3..c9b2fd59 100644 --- a/src/data.rs +++ b/src/data.rs @@ -3,7 +3,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use clap::Clap; +use clap::Parser; use linked_hash_set::LinkedHashSet; use merge::Merge; use rnix::{types::*, SyntaxKind::*}; @@ -310,7 +310,7 @@ pub enum DeployDataError { NoProfileHost(String, String), } -#[derive(Clap, Debug, Clone)] +#[derive(Parser, Debug, Clone, Default)] pub struct Flags { /// Check signatures when using `nix copy` #[clap(short, long)] diff --git a/src/settings.rs b/src/settings.rs index 061a6d24..c1d3a5a4 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -2,13 +2,13 @@ // // SPDX-License-Identifier: MPL-2.0 -use clap::Clap; +use clap::Parser; use envmnt::{self, ExpandOptions, ExpansionType}; use merge::Merge; use serde::{Deserialize, Deserializer}; use std::collections::HashMap; -#[derive(Clap, Deserialize, Debug, Clone, Merge)] +#[derive(Parser, Deserialize, Debug, Clone, Merge, Default)] pub struct GenericSettings { /// Override the SSH user with the given value #[clap(long)] From cb5aa4acccb4f348a5bd30b7aa3d00115acc705f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Date: Wed, 15 Sep 2021 10:03:29 -0700 Subject: [PATCH 16/53] flake: replace naersk with buildRustPackage --- flake.lock | 39 +++-------- flake.nix | 201 ++++++++++++++++++++++------------------------------- 2 files changed, 94 insertions(+), 146 deletions(-) diff --git a/flake.lock b/flake.lock index 559ff0c6..908625da 100644 --- a/flake.lock +++ b/flake.lock @@ -8,11 +8,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1636007046, - "narHash": "sha256-iN6wKOPLSc06osaycQeW8uXIsc/0THpVxqk9akr1Trk=", + "lastModified": 1636093455, + "narHash": "sha256-wfb+drJo8d2n35WYECQ/G0ohORJmydVisnlCGi/Ty7k=", "owner": "nix-community", "repo": "fenix", - "rev": "4552d289909807a8083ea32652f0e0adaa658447", + "rev": "a775d531812a4734b0b0e8277223a8762b35e4cc", "type": "github" }, "original": { @@ -24,11 +24,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1606424373, - "narHash": "sha256-oq8d4//CJOrVj+EcOaSXvMebvuTkmBJuT5tzlfewUnQ=", + "lastModified": 1627913399, + "narHash": "sha256-hY8g6H2KFL8ownSiFeMOjwPC8P0ueXpCVEbxgda3pko=", "owner": "edolstra", "repo": "flake-compat", - "rev": "99f1c2157fba4bfe6211a321fd0ee43199025dbf", + "rev": "12c64ca55c1014cdc1b16ed5a804aa8576601ff2", "type": "github" }, "original": { @@ -37,26 +37,6 @@ "type": "github" } }, - "naersk": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1635777496, - "narHash": "sha256-8y2gyBTD0CMYbiTlmpLNEtvC7c/Al4qIToHTXN0L4kU=", - "owner": "nix-community", - "repo": "naersk", - "rev": "5bed2dbf074d95627a9c35bf262eb577ade97bb9", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "naersk", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1635934775, @@ -77,7 +57,6 @@ "inputs": { "fenix": "fenix", "flake-compat": "flake-compat", - "naersk": "naersk", "nixpkgs": "nixpkgs", "utils": "utils" } @@ -101,11 +80,11 @@ }, "utils": { "locked": { - "lastModified": 1622445595, - "narHash": "sha256-m+JRe6Wc5OZ/mKw2bB3+Tl0ZbtyxxxfnAWln8Q5qs+Y=", + "lastModified": 1634851050, + "narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=", "owner": "numtide", "repo": "flake-utils", - "rev": "7d706970d94bc5559077eb1a6600afddcd25a7c8", + "rev": "c91f3de5adaf1de973b797ef7485e441a65b8935", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 8d0c05a8..208c4e12 100644 --- a/flake.nix +++ b/flake.nix @@ -8,10 +8,6 @@ inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - naersk = { - url = "github:nix-community/naersk"; - inputs.nixpkgs.follows = "nixpkgs"; - }; utils.url = "github:numtide/flake-utils"; flake-compat = { url = "github:edolstra/flake-compat"; @@ -21,37 +17,46 @@ fenix.inputs.nixpkgs.follows = "nixpkgs"; }; - outputs = { self, nixpkgs, utils, naersk, fenix, ... }: + outputs = { self, nixpkgs, utils, fenix, ... }: + { + overlay = final: prev: + let + system = final.system; + isDarwin = final.lib.strings.hasSuffix "-darwin" system; + darwinOptions = final.lib.optionalAttrs isDarwin { + nativeBuildInputs = [ + final.darwin.apple_sdk.frameworks.SystemConfiguration + ]; + }; + in { - overlay = final: prev: - let - system = final.system; - isDarwin = final.lib.strings.hasSuffix "-darwin" system; - darwinOptions = final.lib.optionalAttrs isDarwin { - nativeBuildInputs = - [ final.darwin.apple_sdk.frameworks.SystemConfiguration ]; - }; - in { - deploy-rs = { - - deploy-rs = - final.naersk.buildPackage (darwinOptions // { root = ./.; }) // { - meta.description = - "A Simple multi-profile Nix-flake deploy tool"; - }; - - lib = rec { - - setActivate = builtins.trace - "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" - activate.custom; - - activate = rec { - custom = { - __functor = customSelf: base: activate: - (final.buildEnv { - name = ("activatable-" + base.name); - paths = [ + deploy-rs = { + + deploy-rs = (final.makeRustPlatform { + inherit (final.fenix.stable) cargo rustc; + }).buildRustPackage (darwinOptions // { + pname = "deploy-rs"; + version = "0.1.0"; + + src = ./.; + + cargoLock.lockFile = ./Cargo.lock; + }) // { meta.description = "A Simple multi-profile Nix-flake deploy tool"; }; + + lib = rec { + + setActivate = builtins.trace + "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" + activate.custom; + + activate = rec { + custom = + { + __functor = customSelf: base: activate: + (final.buildEnv { + name = ("activatable-" + base.name); + paths = + [ base (final.writeTextFile { name = base.name + "-activate-path"; @@ -61,11 +66,7 @@ if [[ "''${DRY_ACTIVATE:-}" == "1" ]] then - ${ - customSelf.dryActivate or "echo ${ - final.writeScript "activate" activate - }" - } + ${customSelf.dryActivate or "echo ${final.writeScript "activate" activate}"} else ${activate} fi @@ -77,9 +78,7 @@ name = base.name + "-activate-rs"; text = '' #!${final.runtimeShell} - exec ${ - self.defaultPackage.${system} - }/bin/activate "$@" + exec ${self.defaultPackage.${system}}/bin/activate "$@" ''; executable = true; destination = "/activate-rs"; @@ -88,82 +87,53 @@ } // customSelf); }; - nixos = base: - (custom // { - inherit base; - dryActivate = - "$PROFILE/bin/switch-to-configuration dry-activate"; - }) base.config.system.build.toplevel '' - # work around https://github.com/NixOS/nixpkgs/issues/73404 - cd /tmp - - $PROFILE/bin/switch-to-configuration switch - - # https://github.com/serokell/deploy-rs/issues/31 - ${with base.config.boot.loader; - final.lib.optionalString systemd-boot.enable - "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} - ''; - - home-manager = base: - custom base.activationPackage "$PROFILE/activate"; - - noop = base: custom base ":"; - }; - - deployChecks = deploy: - builtins.mapAttrs (_: check: check deploy) { - schema = deploy: - final.runCommandNoCC "jsonschema-deploy-system" { } '' - ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${ - final.writeText "deploy.json" (builtins.toJSON deploy) - } ${./interface.json} && touch $out - ''; - - activate = deploy: - let - profiles = builtins.concatLists (final.lib.mapAttrsToList - (nodeName: node: - final.lib.mapAttrsToList (profileName: profile: [ - (toString profile.path) - nodeName - profileName - ]) node.profiles) deploy.nodes); - in final.runCommandNoCC "deploy-rs-check-activate" { } '' - for x in ${ - builtins.concatStringsSep " " - (map (p: builtins.concatStringsSep ":" p) profiles) - }; do - profile_path=$(echo $x | cut -f1 -d:) - node_name=$(echo $x | cut -f2 -d:) - profile_name=$(echo $x | cut -f3 -d:) - - test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); - - test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); - done - - touch $out - ''; - }; + nixos = base: (custom // { inherit base; dryActivate = "$PROFILE/bin/switch-to-configuration dry-activate"; }) base.config.system.build.toplevel '' + # work around https://github.com/NixOS/nixpkgs/issues/73404 + cd /tmp + + $PROFILE/bin/switch-to-configuration switch + + # https://github.com/serokell/deploy-rs/issues/31 + ${with base.config.boot.loader; + final.lib.optionalString systemd-boot.enable + "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} + ''; + + home-manager = base: custom base.activationPackage "$PROFILE/activate"; + + noop = base: custom base ":"; + }; + + deployChecks = deploy: builtins.mapAttrs (_: check: check deploy) { + schema = deploy: final.runCommandNoCC "jsonschema-deploy-system" { } '' + ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${final.writeText "deploy.json" (builtins.toJSON deploy)} ${./interface.json} && touch $out + ''; + + activate = deploy: + let + profiles = builtins.concatLists (final.lib.mapAttrsToList (nodeName: node: final.lib.mapAttrsToList (profileName: profile: [ (toString profile.path) nodeName profileName ]) node.profiles) deploy.nodes); + in + final.runCommandNoCC "deploy-rs-check-activate" { } '' + for x in ${builtins.concatStringsSep " " (map (p: builtins.concatStringsSep ":" p) profiles)}; do + profile_path=$(echo $x | cut -f1 -d:) + node_name=$(echo $x | cut -f2 -d:) + profile_name=$(echo $x | cut -f3 -d:) + + test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); + + test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); + done + + touch $out + ''; }; }; }; - } // utils.lib.eachDefaultSystem (system: + }; + } // + utils.lib.eachDefaultSystem (system: let - pkgs = import nixpkgs { - inherit system; - overlays = [ - naersk.overlay - (final: prev: { - naersk = prev.naersk.override { - inherit (fenix.packages.${prev.system}.stable) cargo rustc; - }; - }) - self.overlay - fenix.overlay - ]; - }; + pkgs = import nixpkgs { inherit system; overlays = [ self.overlay fenix.overlay ]; }; in { defaultPackage = self.packages."${system}".deploy-rs; packages.deploy-rs = pkgs.deploy-rs.deploy-rs; @@ -190,8 +160,7 @@ }; checks = { - deploy-rs = self.defaultPackage.${system}.overrideAttrs - (super: { doCheck = true; }); + deploy-rs = self.defaultPackage.${system}.overrideAttrs (super: { doCheck = true; }); }; lib = pkgs.deploy-rs.lib; From 40ff56193990f69f729957bf9a2a9cdbacec8775 Mon Sep 17 00:00:00 2001 From: Peter Woodman Date: Fri, 6 Aug 2021 02:08:12 -0400 Subject: [PATCH 17/53] add aarch64-darwin to built systems --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 208c4e12..dc9b2462 100644 --- a/flake.nix +++ b/flake.nix @@ -131,7 +131,7 @@ }; }; } // - utils.lib.eachDefaultSystem (system: + utils.lib.eachSystem (utils.lib.defaultSystems ++ ["aarch64-darwin"]) (system: let pkgs = import nixpkgs { inherit system; overlays = [ self.overlay fenix.overlay ]; }; in { From d630e135698d71cf810544c6d5e696ddfc45c99f Mon Sep 17 00:00:00 2001 From: Alexander Bantyev Date: Tue, 28 Sep 2021 12:48:27 +0300 Subject: [PATCH 18/53] Fix darwin build Frameworks are libraries used at runtime, so should be in buildInputs and not nativeBuildInputs. Closes https://github.com/serokell/deploy-rs/issues/132 --- flake.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flake.nix b/flake.nix index dc9b2462..2d8dbf0e 100644 --- a/flake.nix +++ b/flake.nix @@ -22,10 +22,10 @@ overlay = final: prev: let system = final.system; - isDarwin = final.lib.strings.hasSuffix "-darwin" system; - darwinOptions = final.lib.optionalAttrs isDarwin { - nativeBuildInputs = [ - final.darwin.apple_sdk.frameworks.SystemConfiguration + darwinOptions = final.lib.optionalAttrs final.stdenv.isDarwin { + buildInputs = with final.darwin.apple_sdk.frameworks; [ + SystemConfiguration + CoreServices ]; }; in From f508d4bcd5ef2380b662f621ff9645e3dcafa40e Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 5 Nov 2021 12:04:04 -0600 Subject: [PATCH 19/53] change external interface to take parsed Opts --- src/cli.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 29d86757..838b1512 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -271,9 +271,9 @@ pub enum RunError { RunDeploy(#[from] RunDeployError), } -pub async fn run(args: Option<&ArgMatches>) -> Result<(), RunError> { +pub async fn run(args: Option) -> Result<(), RunError> { let opts = match args { - Some(o) => ::from_arg_matches(o).unwrap_or_default(), + Some(o) => o, None => Opts::parse(), }; From c518de49f446d08d3a45d6b85d00380261ee25d0 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 5 Nov 2021 12:12:51 -0600 Subject: [PATCH 20/53] make Opts fields public for external use --- src/cli.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 838b1512..e21363aa 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -23,21 +23,21 @@ use tokio::process::Command; pub struct Opts { /// The flake to deploy #[clap(group = "deploy")] - target: Option, + pub target: Option, /// A list of flakes to deploy alternatively #[clap(long, group = "deploy")] - targets: Option>, + pub targets: Option>, /// Override hostname used for the node #[clap(long)] - hostname: Option, + pub hostname: Option, #[clap(flatten)] - flags: data::Flags, + pub flags: data::Flags, #[clap(flatten)] - generic_settings: settings::GenericSettings, + pub generic_settings: settings::GenericSettings, } /// Returns if the available Nix installation supports flakes From 6dbb524702767b6cf17e9d54687b52de492e807e Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 5 Nov 2021 12:37:52 -0600 Subject: [PATCH 21/53] don't require hostname, since we pass in on the fly --- interface.json | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/interface.json b/interface.json index a9471f4f..6a25fc1b 100644 --- a/interface.json +++ b/interface.json @@ -64,10 +64,7 @@ }, "additionalProperties": false } - }, - "required": [ - "hostname" - ] + } }, "profile_settings": { "type": "object", From efde6ef33207027d78d62a0e657fbe73e163c79e Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 00:14:17 -0500 Subject: [PATCH 22/53] improve logging - Announcing a revocation without doing one is confusing --- src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli.rs b/src/cli.rs index e21363aa..ed4e51f0 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -226,8 +226,8 @@ async fn run_deploy( if cmd_flags.dry_activate { info!("dry run, not rolling back"); } - info!("Revoking previous deploys"); if cmd_flags.rollback_succeeded && cmd_settings.auto_rollback.unwrap_or(true) { + info!("Revoking previous deploys"); // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) From 50754605570ae32d3c1604d7b4a73a528c71e9b2 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 00:37:47 -0500 Subject: [PATCH 23/53] imp(flags): make bools true bools - This affords better help rendering (at the very least) --- README.md | 18 ++++++++---------- interface.json | 4 ++-- src/cli.rs | 6 +++--- src/deploy.rs | 24 ++++++++++++------------ src/push.rs | 2 +- src/settings.rs | 18 +++++++++++------- 6 files changed, 37 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index acd2b7f3..276ded97 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ You can try out this tool easily with `nix run`: In you want to deploy multiple flakes or a subset of profiles with one invocation, instead of calling `deploy ` you can issue `deploy --targets [ ...]` where `` is supposed to take the same format as discussed before. -Running in this mode, if any of the deploys fails, the deploy will be aborted and all successful deploys rolled back. `--rollback-succeeded false` can be used to override this behavior, otherwise the `auto-rollback` argument takes precedent. +Running in this mode, if any of the deploys fails, the deploy will be aborted and all successful deploys rolled back. `--rollback-succeeded false` can be used to override this behavior, otherwise the `no-auto-rollback` argument takes precedent. If you require a signing key to push closures to your server, specify the path to it in the `LOCAL_KEY` environment variable. @@ -48,7 +48,7 @@ This type of design (as opposed to more traditional tools like NixOps or morph) ### Magic Rollback -There is a built-in feature to prevent you making changes that might render your machine unconnectable or unusuable, which works by connecting to the machine after profile activation to confirm the machine is still available, and instructing the target node to automatically roll back if it is not confirmed. If you do not disable `magicRollback` in your configuration (see later sections) or with the CLI flag, you will be unable to make changes to the system which will affect you connecting to it (changing SSH port, changing your IP, etc). +There is a built-in feature to prevent you making changes that might render your machine unconnectable or unusuable, which works by connecting to the machine after profile activation to confirm the machine is still available, and instructing the target node to automatically roll back if it is not confirmed. If you do not disable `noMagicRollback` in your configuration (see later sections) or with the CLI flag, you will be unable to make changes to the system which will affect you connecting to it (changing SSH port, changing your IP, etc). ## API @@ -166,17 +166,15 @@ This is a set of options that can be put in any of the above definitions, with t # This defaults to `false` fastConnection = false; - # If the previous profile should be re-activated if activation fails. - # This defaults to `true` - autoRollback = true; + # If the previous profile should NOT be re-activated if activation fails. + noAutoRollback = true; - # See the earlier section about Magic Rollback for more information. - # This defaults to `true` - magicRollback = true; + # See the earlier section about Magic Rollback for more information, disable with this attr. + noMagicRollback = true; - # The path which deploy-rs will use for temporary files, this is currently only used by `magicRollback` to create an inotify watcher in for confirmations + # The path which deploy-rs will use for temporary files, this is currently only used by the magic rollback to create an inotify watcher in for confirmations # If not specified, this will default to `/tmp` - # (if `magicRollback` is in use, this _must_ be writable by `user`) + # (if magic rollback is in use, this _must_ be writable by `user`) tempPath = "/home/someuser/.deploy-rs"; } ``` diff --git a/interface.json b/interface.json index 6a25fc1b..eb47cf48 100644 --- a/interface.json +++ b/interface.json @@ -21,10 +21,10 @@ "fastConnection": { "type": "boolean" }, - "autoRollback": { + "noAutoRollback": { "type": "boolean" }, - "magicRollback": { + "noMagicRollback": { "type": "boolean" }, "confirmTimeout": { diff --git a/src/cli.rs b/src/cli.rs index ed4e51f0..9e21b48d 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -209,7 +209,7 @@ async fn run_deploy( // Run all deployments // In case of an error rollback any previoulsy made deployment. - // Rollbacks adhere to the global seeting to auto_rollback and secondary + // Rollbacks adhere to the global seeting to no_auto_rollback and secondary // the profile's configuration for deploy_data in &parts { if let Err(e) = deploy::deploy::deploy_profile( @@ -226,13 +226,13 @@ async fn run_deploy( if cmd_flags.dry_activate { info!("dry run, not rolling back"); } - if cmd_flags.rollback_succeeded && cmd_settings.auto_rollback.unwrap_or(true) { + if cmd_flags.rollback_succeeded && !cmd_settings.no_auto_rollback { info!("Revoking previous deploys"); // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) for deploy_data in &succeeded { - if deploy_data.merged_settings.auto_rollback.unwrap_or(true) { + if !deploy_data.merged_settings.no_auto_rollback { deploy::deploy::revoke( &deploy_data.node_name, &deploy_data.profile_name, diff --git a/src/deploy.rs b/src/deploy.rs index eb5eca36..09a86be7 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -35,9 +35,9 @@ pub struct ActivateCommand<'a> { profile_path: &'a str, temp_path: &'a str, closure: &'a str, - auto_rollback: bool, + no_auto_rollback: bool, confirm_timeout: u16, - magic_rollback: bool, + no_magic_rollback: bool, debug_logs: bool, log_dir: Option<&'a str>, dry_activate: bool, @@ -50,9 +50,9 @@ impl<'a> ActivateCommand<'a> { profile_path: &d.profile_path, temp_path: &d.temp_path, closure: &d.profile.profile_settings.path, - auto_rollback: d.merged_settings.auto_rollback.unwrap_or(true), + no_auto_rollback: d.merged_settings.no_auto_rollback, confirm_timeout: d.merged_settings.confirm_timeout.unwrap_or(30), - magic_rollback: d.merged_settings.magic_rollback.unwrap_or(true), + no_magic_rollback: d.merged_settings.no_magic_rollback, debug_logs: d.flags.debug_logs, log_dir: d.flags.log_dir.as_deref(), dry_activate: d.flags.dry_activate, @@ -77,11 +77,11 @@ impl<'a> ActivateCommand<'a> { cmd = format!("{} --confirm-timeout {}", cmd, self.confirm_timeout); - if self.magic_rollback { + if !self.no_magic_rollback { cmd = format!("{} --magic-rollback", cmd); } - if self.auto_rollback { + if !self.no_auto_rollback { cmd = format!("{} --auto-rollback", cmd); } @@ -102,11 +102,11 @@ fn test_activation_command_builder() { let sudo = Some("sudo -u test"); let profile_path = "/blah/profiles/test"; let closure = "/nix/store/blah/etc"; - let auto_rollback = true; + let no_auto_rollback = false; let dry_activate = false; let temp_path = "/tmp"; let confirm_timeout = 30; - let magic_rollback = true; + let no_magic_rollback = false; let debug_logs = true; let log_dir = Some("/tmp/something.txt"); @@ -115,10 +115,10 @@ fn test_activation_command_builder() { sudo, profile_path, closure, - auto_rollback, + no_auto_rollback, temp_path, confirm_timeout, - magic_rollback, + no_magic_rollback, debug_logs, log_dir, dry_activate @@ -352,7 +352,7 @@ pub async fn deploy_profile( ); } let dry_activate = &activate.dry_activate.clone(); - let magic_rollback = &activate.magic_rollback.clone(); + let no_magic_rollback = &activate.no_magic_rollback.clone(); let activate_cmd = activate.build(); @@ -360,7 +360,7 @@ pub async fn deploy_profile( let mut ssh_activate_cmd = ssh.build(); - if !*magic_rollback || *dry_activate { + if *no_magic_rollback || *dry_activate { let ssh_activate_exit_status = ssh_activate_cmd .arg(activate_cmd) .status() diff --git a/src/push.rs b/src/push.rs index 2f46273d..2462d5b8 100644 --- a/src/push.rs +++ b/src/push.rs @@ -105,7 +105,7 @@ impl<'a> CopyCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Self { CopyCommand { closure: d.profile.profile_settings.path.as_str(), - fast_connection: d.merged_settings.fast_connection.unwrap_or(false), + fast_connection: d.merged_settings.fast_connection, check_sigs: &d.flags.checksigs, ssh_uri: d.ssh_uri.as_str(), ssh_opts: d diff --git a/src/settings.rs b/src/settings.rs index c1d3a5a4..cebab455 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -29,12 +29,14 @@ pub struct GenericSettings { pub ssh_opts: Vec, /// Override if the connecting to the target node should be considered fast #[clap(long)] - #[serde(rename(deserialize = "fastConnection"))] - pub fast_connection: Option, - /// Override if a rollback should be attempted if activation fails + #[serde(rename(deserialize = "fastConnection"), default)] + #[merge(strategy = merge::bool::overwrite_false)] + pub fast_connection: bool, + /// Do not attempt rollback if activation fails #[clap(long)] - #[serde(rename(deserialize = "autoRollback"))] - pub auto_rollback: Option, + #[serde(rename(deserialize = "noAutoRollback"), default)] + #[merge(strategy = merge::bool::overwrite_false)] + pub no_auto_rollback: bool, /// How long activation should wait for confirmation (if using magic-rollback) #[clap(long)] #[serde(rename(deserialize = "confirmTimeout"))] @@ -43,9 +45,11 @@ pub struct GenericSettings { #[clap(long)] #[serde(rename(deserialize = "tempPath"))] pub temp_path: Option, + /// Do not do a magic rollback (see documentation) #[clap(long)] - #[serde(rename(deserialize = "magicRollback"))] - pub magic_rollback: Option, + #[serde(rename(deserialize = "noMagicRollback"), default)] + #[merge(strategy = merge::bool::overwrite_false)] + pub no_magic_rollback: bool, } impl GenericSettings { From df1b3491d566407530d97d96ddb8e28be53292dc Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 00:50:44 -0500 Subject: [PATCH 24/53] imp(hostname): make hostname optional - generic nixosconfig be known that are hydrated with a target-ip during runtime (e.g. from a remote source) - Example: auto scaling groups --- src/cli.rs | 9 +++++++-- src/data.rs | 10 +++++++--- src/settings.rs | 2 +- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 9e21b48d..f56caccc 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use std::io::{stdin, stdout, Write}; -use clap::{ArgMatches, FromArgMatches, Parser}; +use clap::{Parser}; use crate as deploy; @@ -69,6 +69,11 @@ fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> let mut part_map: HashMap> = HashMap::new(); for data in parts { + let hostname = if let Some(x) = &data.node.node_settings.hostname { + x + } else { + panic!() + }; part_map .entry(data.node_name.to_string()) .or_insert_with(HashMap::new) @@ -78,7 +83,7 @@ fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> user: &data.profile_user, ssh_user: &data.ssh_user, path: &data.profile.profile_settings.path, - hostname: &data.node.node_settings.hostname, + hostname, ssh_opts: &data.merged_settings.ssh_opts, }, ); diff --git a/src/data.rs b/src/data.rs index c9b2fd59..7ab4e544 100644 --- a/src/data.rs +++ b/src/data.rs @@ -306,8 +306,8 @@ pub struct DeployData<'a> { pub enum DeployDataError { #[error("Neither `user` nor `sshUser` are set for profile {0} of node {1}")] NoProfileUser(String, String), - #[error("Value `hostname` is not define for profile {0} of node {1}")] - NoProfileHost(String, String), + #[error("Value `hostname` is not define for node {0}")] + NoHost(String), } #[derive(Parser, Debug, Clone, Default)] @@ -400,7 +400,11 @@ impl<'a> DeployData<'a> { }; let hostname = match hostname { Some(x) => x, - None => &node.node_settings.hostname, + None => if let Some(ref x) = node.node_settings.hostname { + x + } else { + return Err(DeployDataError::NoHost(node_name)); + }, }; let ssh_uri = format!("ssh://{}@{}", &ssh_user, &hostname); diff --git a/src/settings.rs b/src/settings.rs index cebab455..ea1479d3 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -71,7 +71,7 @@ impl GenericSettings { #[derive(Deserialize, Debug, Clone)] pub struct NodeSettings { - pub hostname: String, + pub hostname: Option, pub profiles: HashMap, #[serde( skip_serializing_if = "Vec::is_empty", From 9dc922997b52533de827bc5296250579f4c33bff Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 12:30:36 -0500 Subject: [PATCH 25/53] fix(hostname): accessor resolved values, not raw ones - The resolved value already has been homologized --- src/cli.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index f56caccc..3efce0a0 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -69,11 +69,6 @@ fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> let mut part_map: HashMap> = HashMap::new(); for data in parts { - let hostname = if let Some(x) = &data.node.node_settings.hostname { - x - } else { - panic!() - }; part_map .entry(data.node_name.to_string()) .or_insert_with(HashMap::new) @@ -83,7 +78,7 @@ fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> user: &data.profile_user, ssh_user: &data.ssh_user, path: &data.profile.profile_settings.path, - hostname, + hostname: &data.hostname, ssh_opts: &data.merged_settings.ssh_opts, }, ); From fee7e09e9bed1fbfaa7ad872b31d579644ae1cb2 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 15:06:29 -0500 Subject: [PATCH 26/53] imp(ip-suffix): implement target ip suffix - This allows to override multiple ip-per-target through a uri-like grammar `.#target@ip-address` --- src/cli.rs | 2 +- src/data.rs | 73 +++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 61 insertions(+), 14 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 3efce0a0..29ccce0d 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -178,7 +178,7 @@ async fn run_deploy( let deploy_datas_ = targets .into_iter() .zip(&settings) - .map(|(target, root)| target.resolve(&root, &cmd_settings, &cmd_flags, hostname.as_deref())) + .map(|(target, root)| target.resolve(&root, &cmd_settings, &cmd_flags, hostname.to_owned())) .collect::>>, data::ResolveTargetError>>()?; let deploy_datas: Vec<&data::DeployData<'_>> = deploy_datas_.iter().flatten().collect(); diff --git a/src/data.rs b/src/data.rs index 7ab4e544..11cc6e57 100644 --- a/src/data.rs +++ b/src/data.rs @@ -8,6 +8,7 @@ use linked_hash_set::LinkedHashSet; use merge::Merge; use rnix::{types::*, SyntaxKind::*}; use thiserror::Error; +use std::net::{AddrParseError}; use crate::settings; @@ -16,12 +17,15 @@ pub struct Target { pub repo: String, pub node: Option, pub profile: Option, + pub ip: Option, } #[derive(Error, Debug)] pub enum ParseTargetError { #[error("The given path was too long, did you mean to put something in quotes?")] PathTooLong, + #[error("Invalid IP suffix for target '{0}': {1}")] + InvalidIp(String, AddrParseError), #[error("Unrecognized node or token encountered")] Unrecognized, } @@ -36,6 +40,8 @@ pub enum ResolveTargetError { ProfileWithoutNode(String), #[error("Deployment data invalid: {0}")] InvalidDeployDataError(#[from] DeployDataError), + #[error("IP suffix on flake root target '{0}'. You can't deploy all the flake's targets to the same node, dude.")] + IpOnFlakeRoot(String), } impl<'a> Target { @@ -44,13 +50,14 @@ impl<'a> Target { r: &'a settings::Root, cs: &'a settings::GenericSettings, cf: &'a Flags, - hostname: Option<&'a str>, + hostname: Option, ) -> Result>, ResolveTargetError> { match self { Target { repo, node: Some(node), profile, + ip, } => { let node_ = match r.nodes.get(&node) { Some(x) => x, @@ -68,6 +75,11 @@ impl<'a> Target { } }; Ok({ + let hostname_: Option = if let Some(_) = ip { + ip + } else { + hostname + }; let d = DeployData::new( repo, node.to_owned(), @@ -77,7 +89,7 @@ impl<'a> Target { cf, node_, profile_, - hostname, + hostname_, )?; vec![d] }) @@ -95,8 +107,9 @@ impl<'a> Target { repo: repo.to_owned(), node: Some(node.to_owned()), profile: Some(p.to_string()), + ip: ip.to_owned(), } - .resolve(r, cs, cf, hostname) + .resolve(r, cs, cf, hostname.to_owned()) }) .collect::>>, ResolveTargetError>>()? .into_iter() @@ -104,10 +117,17 @@ impl<'a> Target { .collect::>>()) } } + Target { + repo, + node: None, + profile: _, + ip: Some(_), + } => Err(ResolveTargetError::IpOnFlakeRoot(repo)), Target { repo, node: None, profile: None, + ip: _, } => { if let Some(_hostname) = hostname { todo!() // create issue to discuss: @@ -121,8 +141,9 @@ impl<'a> Target { repo: repo.to_owned(), node: Some(n.to_string()), profile: None, + ip: self.ip.to_owned(), } - .resolve(r, cs, cf, hostname) + .resolve(r, cs, cf, hostname.to_owned()) }) .collect::>>, ResolveTargetError>>()? .into_iter() @@ -133,6 +154,7 @@ impl<'a> Target { repo, node: None, profile: Some(_), + ip: _, } => Err(ResolveTargetError::ProfileWithoutNode(repo)), } } @@ -142,17 +164,41 @@ impl std::str::FromStr for Target { type Err = ParseTargetError; fn from_str(s: &str) -> Result { - let flake_fragment_start = s.find('#'); - let (repo, maybe_fragment) = match flake_fragment_start { + let target_fragment_start = s.find('#'); + let (repo, maybe_target_full) = match target_fragment_start { Some(i) => (s[..i].to_string(), Some(&s[i + 1..])), None => (s.to_string(), None), }; + let mut maybe_target: Option<&str> = None; + + let mut ip: Option = None; + + if let Some(t) = maybe_target_full { + let ip_fragment_start = t.find('@'); + if let Some(i) = ip_fragment_start { + maybe_target = Some(&t[..i]); + ip = Some(t[i + 1..].to_string()); + // match t[i + 1..].parse() { + // Ok(k) => k, + // Err(e) => return Err( + // ParseTargetError::InvalidIp( + // maybe_target.unwrap().to_string(), + // e + // ) + // ), + // }; + } else { + maybe_target = maybe_target_full; + }; + }; + + let mut node: Option = None; let mut profile: Option = None; - if let Some(fragment) = maybe_fragment { - let ast = rnix::parse(fragment); + if let Some(target) = maybe_target { + let ast = rnix::parse(target); let first_child = match ast.root().node().first_child() { Some(x) => x, @@ -161,6 +207,7 @@ impl std::str::FromStr for Target { repo, node: None, profile: None, + ip, // NB: error if not none; catched on target resolve }) } }; @@ -203,6 +250,7 @@ impl std::str::FromStr for Target { repo, node, profile, + ip, }) } } @@ -292,8 +340,7 @@ pub struct DeployData<'a> { pub profile: &'a settings::Profile, pub merged_settings: settings::GenericSettings, - pub hostname: &'a str, - + pub hostname: String, pub ssh_user: String, pub ssh_uri: String, pub temp_path: String, @@ -358,7 +405,7 @@ impl<'a> DeployData<'a> { flags: &'a Flags, node: &'a settings::Node, profile: &'a settings::Profile, - hostname: Option<&'a str>, + hostname: Option, ) -> Result, DeployDataError> { let mut merged_settings = cmd_settings.clone(); merged_settings.merge(profile.generic_settings.clone()); @@ -400,8 +447,8 @@ impl<'a> DeployData<'a> { }; let hostname = match hostname { Some(x) => x, - None => if let Some(ref x) = node.node_settings.hostname { - x + None => if let Some(x) = &node.node_settings.hostname { + x.to_string() } else { return Err(DeployDataError::NoHost(node_name)); }, From 5603808062ce7ba7ef79d1a97511f941a484c9e8 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 16:58:58 -0500 Subject: [PATCH 27/53] imp(socket): resolve sockets at the entrypoint - Allow for different ssh ports - Requires to specify hostnames with port such as: `host:port` or `ip:port` --- src/cli.rs | 6 ++---- src/data.rs | 32 +++++++++++++++++--------------- src/deploy.rs | 7 +++---- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 29ccce0d..ea0037fa 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -59,9 +59,8 @@ async fn test_flake_support() -> Result { #[derive(Serialize)] struct PromptPart<'a> { user: &'a str, - ssh_user: &'a str, path: &'a str, - hostname: &'a str, + uri: &'a str, ssh_opts: &'a [String], } @@ -76,9 +75,8 @@ fn print_deployment(parts: &[&data::DeployData]) -> Result<(), toml::ser::Error> data.profile_name.to_string(), PromptPart { user: &data.profile_user, - ssh_user: &data.ssh_user, path: &data.profile.profile_settings.path, - hostname: &data.hostname, + uri: &data.ssh_uri, ssh_opts: &data.merged_settings.ssh_opts, }, ); diff --git a/src/data.rs b/src/data.rs index 11cc6e57..74910881 100644 --- a/src/data.rs +++ b/src/data.rs @@ -8,7 +8,7 @@ use linked_hash_set::LinkedHashSet; use merge::Merge; use rnix::{types::*, SyntaxKind::*}; use thiserror::Error; -use std::net::{AddrParseError}; +use std::net::{SocketAddr, ToSocketAddrs}; use crate::settings; @@ -24,8 +24,6 @@ pub struct Target { pub enum ParseTargetError { #[error("The given path was too long, did you mean to put something in quotes?")] PathTooLong, - #[error("Invalid IP suffix for target '{0}': {1}")] - InvalidIp(String, AddrParseError), #[error("Unrecognized node or token encountered")] Unrecognized, } @@ -179,15 +177,6 @@ impl std::str::FromStr for Target { if let Some(i) = ip_fragment_start { maybe_target = Some(&t[..i]); ip = Some(t[i + 1..].to_string()); - // match t[i + 1..].parse() { - // Ok(k) => k, - // Err(e) => return Err( - // ParseTargetError::InvalidIp( - // maybe_target.unwrap().to_string(), - // e - // ) - // ), - // }; } else { maybe_target = maybe_target_full; }; @@ -340,7 +329,11 @@ pub struct DeployData<'a> { pub profile: &'a settings::Profile, pub merged_settings: settings::GenericSettings, - pub hostname: String, + // TODO: can be used instead of ssh_uri to iterate + // over potentially a series of sockets to deploy + // to + // pub sockets: Vec, + pub ssh_user: String, pub ssh_uri: String, pub temp_path: String, @@ -355,6 +348,8 @@ pub enum DeployDataError { NoProfileUser(String, String), #[error("Value `hostname` is not define for node {0}")] NoHost(String), + #[error("Cannot creato a socket for '{0}' from '{1}': {2}")] + InvalidSockent(String, String, String), } #[derive(Parser, Debug, Clone, Default)] @@ -453,7 +448,14 @@ impl<'a> DeployData<'a> { return Err(DeployDataError::NoHost(node_name)); }, }; - let ssh_uri = format!("ssh://{}@{}", &ssh_user, &hostname); + let maybe_iter = &mut hostname[..].to_socket_addrs(); + let sockets: Vec = match maybe_iter { + Ok(x) => x.into_iter().collect(), + Err(err) => return Err( + DeployDataError::InvalidSockent(repo, hostname, err.to_string()), + ), + }; + let ssh_uri = format!("ssh://{}@{}", &ssh_user, sockets.first().unwrap()); Ok(DeployData { repo, @@ -463,7 +465,7 @@ impl<'a> DeployData<'a> { node, profile, merged_settings, - hostname, + // sockets, ssh_user, ssh_uri, temp_path, diff --git a/src/deploy.rs b/src/deploy.rs index 09a86be7..aac155fb 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -11,20 +11,19 @@ use tokio::process::Command; use crate::data; pub struct SshCommand<'a> { - hoststring: String, + ssh_uri: &'a str, opts: &'a Vec, } impl<'a> SshCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Result { - let hoststring = format!("{}@{}", &d.ssh_user, d.hostname); let opts = d.merged_settings.ssh_opts.as_ref(); - Ok(SshCommand { hoststring, opts }) + Ok(SshCommand { ssh_uri: d.ssh_uri.as_ref(), opts }) } fn build(&self) -> Command { let mut cmd = Command::new("ssh"); - cmd.arg(&self.hoststring); + cmd.arg(self.ssh_uri); cmd.args(self.opts.iter()); cmd } From 55646085ebd5eb11eebad6311f9f89b12079710c Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 17:06:09 -0500 Subject: [PATCH 28/53] imp tests --- examples/system/flake.nix | 4 ++-- src/data.rs | 39 +++++++++++++++++++++++---------------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/examples/system/flake.nix b/examples/system/flake.nix index bcc841c7..bea9a9ce 100644 --- a/examples/system/flake.nix +++ b/examples/system/flake.nix @@ -23,8 +23,8 @@ defaultPackage.x86_64-linux = import ./hello.nix nixpkgs; deploy.nodes.example = { - sshOpts = [ "-p" "2221" ]; - hostname = "localhost"; + sshOpts = [ "-i" "./path/to/private/key" ]; + hostname = "localhost:2221"; fastConnection = true; profiles = { system = { diff --git a/src/data.rs b/src/data.rs index 74910881..c266784c 100644 --- a/src/data.rs +++ b/src/data.rs @@ -247,73 +247,80 @@ impl std::str::FromStr for Target { #[test] fn test_deploy_target_from_str() { assert_eq!( - "../deploy/examples/system".parse::().unwrap(), + "../examples/system".parse::().unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: None, profile: None, + ip: None, } ); assert_eq!( - "../deploy/examples/system#".parse::().unwrap(), + "../examples/system#".parse::().unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: None, profile: None, + ip: None, } ); assert_eq!( - "../deploy/examples/system#computer.\"something.nix\"" + "../examples/system#computer.\"something.nix\"@localhost:22" .parse::() .unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: Some("computer".to_string()), profile: Some("something.nix".to_string()), + ip: Some("localhost:22".to_string()), } ); assert_eq!( - "../deploy/examples/system#\"example.com\".system" + "../examples/system#\"example.com\".system" .parse::() .unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: Some("example.com".to_string()), profile: Some("system".to_string()), + ip: None, } ); assert_eq!( - "../deploy/examples/system#example" + "../examples/system#example" .parse::() .unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: Some("example".to_string()), - profile: None + profile: None, + ip: None, } ); assert_eq!( - "../deploy/examples/system#example.system" + "../examples/system#example.system" .parse::() .unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: Some("example".to_string()), - profile: Some("system".to_string()) + profile: Some("system".to_string()), + ip: None, } ); assert_eq!( - "../deploy/examples/system".parse::().unwrap(), + "../examples/system".parse::().unwrap(), Target { - repo: "../deploy/examples/system".to_string(), + repo: "../examples/system".to_string(), node: None, profile: None, + ip: None, } ); } From 0c28e0c52d07b63eb88c49bcb116714ad7ba2479 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 18:19:31 -0500 Subject: [PATCH 29/53] fix ssh uri/socket for nix copy - `nix copy --to` does not understans `root@host:port`, only `root@host` + `NIX_SSHOPTS` --- src/push.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/push.rs b/src/push.rs index 2462d5b8..2fbff10b 100644 --- a/src/push.rs +++ b/src/push.rs @@ -97,22 +97,27 @@ pub struct CopyCommand<'a> { closure: &'a str, fast_connection: bool, check_sigs: &'a bool, - ssh_uri: &'a str, - ssh_opts: String, + hostname: &'a str, + nix_ssh_opts: String, } impl<'a> CopyCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Self { + // ssh_uri: ssh://host:port + let (uri, port) = d.ssh_uri.as_str().rsplit_once(":").unwrap(); CopyCommand { closure: d.profile.profile_settings.path.as_str(), fast_connection: d.merged_settings.fast_connection, check_sigs: &d.flags.checksigs, - ssh_uri: d.ssh_uri.as_str(), - ssh_opts: d + hostname: uri, + nix_ssh_opts: format!("{} -p {}", + d .merged_settings .ssh_opts .iter() .fold("".to_string(), |s, o| format!("{} {}", s, o)), + port, + ), } } @@ -129,9 +134,9 @@ impl<'a> CopyCommand<'a> { cmd.arg("--no-check-sigs"); } cmd.arg("--to") - .arg(self.ssh_uri) + .arg(self.hostname) .arg(self.closure) - .env("NIX_SSHOPTS", self.ssh_opts); + .env("NIX_SSHOPTS", self.nix_ssh_opts); //cmd.what_is_this; cmd } From 9553b8c76c1c5ec8c3c007b2877f2c49412efecd Mon Sep 17 00:00:00 2001 From: David Arnold Date: Sat, 6 Nov 2021 21:11:13 -0500 Subject: [PATCH 30/53] feat(ssh_opts): concede multiple values to downstream - `bitte deploy` still needs the multiple values privilige --- src/data.rs | 1 + src/settings.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/data.rs b/src/data.rs index c266784c..807876e4 100644 --- a/src/data.rs +++ b/src/data.rs @@ -368,6 +368,7 @@ pub struct Flags { #[clap(short, long)] pub interactive: bool, /// Extra arguments to be passed to nix build + #[clap(long)] pub extra_build_args: Vec, /// Print debug logs to output diff --git a/src/settings.rs b/src/settings.rs index ea1479d3..e52c6d3b 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -18,7 +18,7 @@ pub struct GenericSettings { #[clap(long = "profile-user")] pub user: Option, /// Override the SSH options used - #[clap(long, multiple_occurrences(true), multiple_values(true))] + #[clap(long, multiple_occurrences(true))] #[serde( skip_serializing_if = "Vec::is_empty", default, From 3a8d4f354dd73796c3e3f098c8ba8c094b5bfb81 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Thu, 18 Nov 2021 10:45:31 -0700 Subject: [PATCH 31/53] ensure spawned thread exits before main --- src/deploy.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/deploy.rs b/src/deploy.rs index aac155fb..5219171e 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -393,7 +393,7 @@ pub async fn deploy_profile( let (send_activate, recv_activate) = tokio::sync::oneshot::channel(); let (send_activated, recv_activated) = tokio::sync::oneshot::channel(); - tokio::spawn(async move { + let thread = tokio::spawn(async move { let o = ssh_activate.wait_with_output().await; let maybe_err = match o { @@ -429,6 +429,10 @@ pub async fn deploy_profile( let c = confirm_profile(ssh, confirm).await; recv_activated.await.unwrap(); c?; + + thread + .await + .map_err(|x| DeployProfileError::SSHActivate(x.into()))?; } Ok(()) From aebc73d3659542333c20b37db19bb9bf9711bccd Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Thu, 18 Nov 2021 13:00:55 -0700 Subject: [PATCH 32/53] optionally skip checks from environment --- src/data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.rs b/src/data.rs index 807876e4..0619c3ad 100644 --- a/src/data.rs +++ b/src/data.rs @@ -386,7 +386,7 @@ pub struct Flags { pub result_path: Option, /// Skip the automatic pre-build checks - #[clap(short, long)] + #[clap(short, long, env = "DEPLOY_SKIP_CHECKS")] pub skip_checks: bool, /// Make activation wait for confirmation, or roll back after a period of time /// Show what will be activated on the machines From 3b9dcd1d1a66f0074236d565ded2022f5ae8a168 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:31:08 -0700 Subject: [PATCH 33/53] flake: update lock and follow fenix nixpkgs Also, ensure we are setting `RUST_SRC_PATH` from the actual rust derivation in use in the shell. --- flake.lock | 39 ++++++++++++++++++++------------------- flake.nix | 20 ++++++++++---------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/flake.lock b/flake.lock index 908625da..37846f20 100644 --- a/flake.lock +++ b/flake.lock @@ -2,17 +2,15 @@ "nodes": { "fenix": { "inputs": { - "nixpkgs": [ - "nixpkgs" - ], + "nixpkgs": "nixpkgs", "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1636093455, - "narHash": "sha256-wfb+drJo8d2n35WYECQ/G0ohORJmydVisnlCGi/Ty7k=", + "lastModified": 1637303083, + "narHash": "sha256-e2A5JBjxYNpjoGd53K0oVUUaS9ojwOT5rnThyPNS46M=", "owner": "nix-community", "repo": "fenix", - "rev": "a775d531812a4734b0b0e8277223a8762b35e4cc", + "rev": "8294ceadbbbe1a886640bfcc15f5a02a2b471955", "type": "github" }, "original": { @@ -39,16 +37,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1635934775, - "narHash": "sha256-DUkBfZjgeefgqyvFxnkZiOOWXgHP5Y1oKp/Zm+LT05Y=", - "owner": "NixOS", + "lastModified": 1636976544, + "narHash": "sha256-9ZmdyoRz4Qu8bP5BKR1T10YbzcB9nvCeQjOEw2cRKR0=", + "owner": "nixos", "repo": "nixpkgs", - "rev": "4789953e5c1ef6d10e3ff437e5b7ab8eed526942", + "rev": "931ab058daa7e4cd539533963f95e2bb0dbd41e6", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", + "owner": "nixos", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } @@ -57,18 +55,21 @@ "inputs": { "fenix": "fenix", "flake-compat": "flake-compat", - "nixpkgs": "nixpkgs", + "nixpkgs": [ + "fenix", + "nixpkgs" + ], "utils": "utils" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1635970994, - "narHash": "sha256-X0q/ZtxTofMGV0shLNiMzgxv44/Rt5x84VEmgjOawZg=", + "lastModified": 1637268320, + "narHash": "sha256-lxB1r+7cmZisiGLx0tZ2LaC6X/EcQTbRIWZfnLIIgs4=", "owner": "rust-analyzer", "repo": "rust-analyzer", - "rev": "a8247685cfa09084bd620c0877ea1eb3d605d8a2", + "rev": "f0da9406bcbde1bc727242b481d8de825e84f59a", "type": "github" }, "original": { @@ -80,11 +81,11 @@ }, "utils": { "locked": { - "lastModified": 1634851050, - "narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=", + "lastModified": 1637014545, + "narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=", "owner": "numtide", "repo": "flake-utils", - "rev": "c91f3de5adaf1de973b797ef7485e441a65b8935", + "rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 2d8dbf0e..d4b9fb37 100644 --- a/flake.nix +++ b/flake.nix @@ -7,14 +7,13 @@ description = "A Simple multi-profile Nix-flake deploy tool."; inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + nixpkgs.follows = "fenix/nixpkgs"; utils.url = "github:numtide/flake-utils"; flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; fenix.url = "github:nix-community/fenix"; - fenix.inputs.nixpkgs.follows = "nixpkgs"; }; outputs = { self, nixpkgs, utils, fenix, ... }: @@ -134,6 +133,13 @@ utils.lib.eachSystem (utils.lib.defaultSystems ++ ["aarch64-darwin"]) (system: let pkgs = import nixpkgs { inherit system; overlays = [ self.overlay fenix.overlay ]; }; + rustPkg = pkgs.fenix.stable.withComponents [ + "cargo" + "clippy" + "rust-src" + "rustc" + "rustfmt" + ]; in { defaultPackage = self.packages."${system}".deploy-rs; packages.deploy-rs = pkgs.deploy-rs.deploy-rs; @@ -145,17 +151,11 @@ }; devShell = pkgs.mkShell { - RUST_SRC_PATH = pkgs.rustPlatform.rustLibSrc; + RUST_SRC_PATH = "${rustPkg}/lib/rustlib/src/rust/library"; buildInputs = with pkgs; [ rust-analyzer-nightly reuse - (pkgs.fenix.stable.withComponents [ - "cargo" - "clippy" - "rust-src" - "rustc" - "rustfmt" - ]) + rustPkg ]; }; From 1d3d7a0dfe63a918b5149d61f9c35e992cac8d70 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:36:06 -0700 Subject: [PATCH 34/53] clippy: resolve lints --- src/cli.rs | 24 ++++++++++++------------ src/data.rs | 2 +- src/deploy.rs | 2 +- src/push.rs | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index ea0037fa..7e72de9b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use std::io::{stdin, stdout, Write}; -use clap::{Parser}; +use clap::Parser; use crate as deploy; @@ -176,7 +176,7 @@ async fn run_deploy( let deploy_datas_ = targets .into_iter() .zip(&settings) - .map(|(target, root)| target.resolve(&root, &cmd_settings, &cmd_flags, hostname.to_owned())) + .map(|(target, root)| target.resolve(root, &cmd_settings, &cmd_flags, hostname.to_owned())) .collect::>>, data::ResolveTargetError>>()?; let deploy_datas: Vec<&data::DeployData<'_>> = deploy_datas_.iter().flatten().collect(); @@ -195,10 +195,10 @@ async fn run_deploy( for deploy_data in &parts { deploy::push::push_profile( supports_flakes, - deploy::push::ShowDerivationCommand::from_data(&deploy_data), - deploy::push::BuildCommand::from_data(&deploy_data), - deploy::push::SignCommand::from_data(&deploy_data), - deploy::push::CopyCommand::from_data(&deploy_data), + deploy::push::ShowDerivationCommand::from_data(deploy_data), + deploy::push::BuildCommand::from_data(deploy_data), + deploy::push::SignCommand::from_data(deploy_data), + deploy::push::CopyCommand::from_data(deploy_data), ) .await?; } @@ -213,10 +213,10 @@ async fn run_deploy( if let Err(e) = deploy::deploy::deploy_profile( &deploy_data.node_name, &deploy_data.profile_name, - deploy::deploy::SshCommand::from_data(&deploy_data)?, - deploy::deploy::ActivateCommand::from_data(&deploy_data), - deploy::deploy::WaitCommand::from_data(&deploy_data), - deploy::deploy::ConfirmCommand::from_data(&deploy_data), + deploy::deploy::SshCommand::from_data(deploy_data)?, + deploy::deploy::ActivateCommand::from_data(deploy_data), + deploy::deploy::WaitCommand::from_data(deploy_data), + deploy::deploy::ConfirmCommand::from_data(deploy_data), ) .await { @@ -234,8 +234,8 @@ async fn run_deploy( deploy::deploy::revoke( &deploy_data.node_name, &deploy_data.profile_name, - deploy::deploy::SshCommand::from_data(&deploy_data)?, - deploy::deploy::RevokeCommand::from_data(&deploy_data), + deploy::deploy::SshCommand::from_data(deploy_data)?, + deploy::deploy::RevokeCommand::from_data(deploy_data), ) .await?; } diff --git a/src/data.rs b/src/data.rs index 0619c3ad..a545d113 100644 --- a/src/data.rs +++ b/src/data.rs @@ -73,7 +73,7 @@ impl<'a> Target { } }; Ok({ - let hostname_: Option = if let Some(_) = ip { + let hostname_: Option = if ip.is_some() { ip } else { hostname diff --git a/src/deploy.rs b/src/deploy.rs index 5219171e..d4dacd7a 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -268,7 +268,7 @@ impl<'a> ConfirmCommand<'a> { } fn build(self) -> String { - let lock_path = super::make_lock_path(&self.temp_path, &self.closure); + let lock_path = super::make_lock_path(self.temp_path, self.closure); let mut cmd = format!("rm {}", lock_path); if let Some(sudo_cmd) = &self.sudo { diff --git a/src/push.rs b/src/push.rs index 2fbff10b..5bcc6c51 100644 --- a/src/push.rs +++ b/src/push.rs @@ -156,7 +156,7 @@ impl<'a> BuildCommand<'a> { node_name: d.node_name.as_str(), profile_name: d.profile_name.as_str(), keep_result: &d.flags.keep_result, - result_path: &d.flags.result_path.as_deref().unwrap_or("./.deploy-gc"), + result_path: d.flags.result_path.as_deref().unwrap_or("./.deploy-gc"), extra_build_args: &d.flags.extra_build_args, } } From ef7e1f855809d9a9bdab09577cc7cc03db3f44d6 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:40:38 -0700 Subject: [PATCH 35/53] Cargo.lock: remove unused dependency --- Cargo.lock | 10 ---------- Cargo.toml | 1 - 2 files changed, 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f1a93af..2b44493b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,7 +126,6 @@ dependencies = [ "clap", "envmnt", "flexi_logger", - "fork", "futures-util", "linked_hash_set", "log", @@ -188,15 +187,6 @@ dependencies = [ "yansi", ] -[[package]] -name = "fork" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c5b9b0bce249a456f83ac4404e8baad0d2ba81cf651949719a4f74eb7323bb" -dependencies = [ - "libc", -] - [[package]] name = "fsevent-sys" version = "4.0.0" diff --git a/Cargo.toml b/Cargo.toml index 4c24781a..7769c994 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,6 @@ edition = "2021" [dependencies] clap = "3.0.0-beta.5" flexi_logger = "0.16" -fork = "0.1" futures-util = "0.3.6" linked_hash_set = "0.1.4" log = "0.4" From bef8614046fcdea5a0258f3a7db39dd77e0cf73d Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:51:28 -0700 Subject: [PATCH 36/53] nix: ensure package and shell use same toolchain --- flake.nix | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index d4b9fb37..929e4762 100644 --- a/flake.nix +++ b/flake.nix @@ -16,7 +16,9 @@ fenix.url = "github:nix-community/fenix"; }; - outputs = { self, nixpkgs, utils, fenix, ... }: + outputs = { self, nixpkgs, utils, fenix, ... }: let + toolchain = "stable"; + in { overlay = final: prev: let @@ -32,7 +34,7 @@ deploy-rs = { deploy-rs = (final.makeRustPlatform { - inherit (final.fenix.stable) cargo rustc; + inherit (final.fenix.${toolchain}) cargo rustc; }).buildRustPackage (darwinOptions // { pname = "deploy-rs"; version = "0.1.0"; @@ -133,7 +135,7 @@ utils.lib.eachSystem (utils.lib.defaultSystems ++ ["aarch64-darwin"]) (system: let pkgs = import nixpkgs { inherit system; overlays = [ self.overlay fenix.overlay ]; }; - rustPkg = pkgs.fenix.stable.withComponents [ + rustPkg = pkgs.fenix.${toolchain}.withComponents [ "cargo" "clippy" "rust-src" From 633b9bca06b6b8519143a0bbdc7dc6e34d000329 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:52:28 -0700 Subject: [PATCH 37/53] cargo: use thin lto --- Cargo.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 7769c994..35fbd150 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,3 +38,7 @@ smol_str = "=0.1.16" [lib] name = "deploy" path = "src/lib.rs" + +[profile.release] +lto = "thin" +opt-level = 3 From e41fd9c6ff16dd2033076c50223fc04719972917 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 13:54:18 -0700 Subject: [PATCH 38/53] cargo: update deps --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b44493b..92a1af08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -350,9 +350,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.106" +version = "0.2.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" +checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" [[package]] name = "linked-hash-map" @@ -696,9 +696,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" dependencies = [ "itoa", "ryu", @@ -822,9 +822,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" +checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144" dependencies = [ "autocfg", "bytes", @@ -842,9 +842,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" +checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e" dependencies = [ "proc-macro2", "quote", From 5a6db26726ec8c7904aea5bcdf13589342386f9d Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 19 Nov 2021 14:31:22 -0700 Subject: [PATCH 39/53] flake: use self instead of ./. --- flake.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index 929e4762..d087b13c 100644 --- a/flake.nix +++ b/flake.nix @@ -39,9 +39,9 @@ pname = "deploy-rs"; version = "0.1.0"; - src = ./.; + src = self; - cargoLock.lockFile = ./Cargo.lock; + cargoLock.lockFile = "${self}/Cargo.lock"; }) // { meta.description = "A Simple multi-profile Nix-flake deploy tool"; }; lib = rec { @@ -107,7 +107,7 @@ deployChecks = deploy: builtins.mapAttrs (_: check: check deploy) { schema = deploy: final.runCommandNoCC "jsonschema-deploy-system" { } '' - ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${final.writeText "deploy.json" (builtins.toJSON deploy)} ${./interface.json} && touch $out + ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${final.writeText "deploy.json" (builtins.toJSON deploy)} ${self}/interface.json && touch $out ''; activate = deploy: From 1d3a4f4681a98479219c628165bb6b3a12eae843 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Tue, 30 Nov 2021 19:30:24 -0500 Subject: [PATCH 40/53] imp debugging - the previous debbugging messages where a bit inconsistent and not enough --- src/deploy.rs | 45 +++++++++++++++++++++++++-------------------- src/push.rs | 29 ++++++++++++++++++----------- 2 files changed, 43 insertions(+), 31 deletions(-) diff --git a/src/deploy.rs b/src/deploy.rs index d4dacd7a..cef64458 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -25,6 +25,8 @@ impl<'a> SshCommand<'a> { let mut cmd = Command::new("ssh"); cmd.arg(self.ssh_uri); cmd.args(self.opts.iter()); + + debug!("Built command: SshCommand -> {:?}", cmd); cmd } } @@ -92,6 +94,7 @@ impl<'a> ActivateCommand<'a> { cmd = format!("{} {}", sudo_cmd, cmd); } + debug!("Built command: ActivateCommand -> {}", cmd); cmd } } @@ -166,6 +169,7 @@ impl<'a> WaitCommand<'a> { cmd = format!("{} {}", sudo_cmd, cmd); } + debug!("Built command: WaitCommand -> {}", cmd); cmd } } @@ -227,6 +231,7 @@ impl<'a> RevokeCommand<'a> { cmd = format!("{} {}", sudo_cmd, cmd); } + debug!("Built command: RevokeCommand -> {}", cmd); cmd } } @@ -274,6 +279,8 @@ impl<'a> ConfirmCommand<'a> { if let Some(sudo_cmd) = &self.sudo { cmd = format!("{} {}", sudo_cmd, cmd); } + + debug!("Built command: ConfirmCommand -> {}", cmd); cmd } } @@ -292,22 +299,20 @@ pub async fn confirm_profile( ssh: SshCommand<'_>, confirm: ConfirmCommand<'_>, ) -> Result<(), ConfirmProfileError> { + + debug!("Entering confirm_profile function ..."); + let mut ssh_confirm_cmd = ssh.build(); let confirm_cmd = confirm.build(); - debug!( - "Attempting to run command to confirm deployment: {}", - confirm_cmd - ); - - let ssh_confirm_exit_status = ssh_confirm_cmd + let ssh_confirm_cmd_handle = ssh_confirm_cmd .arg(confirm_cmd) - .status() + .output() .await .map_err(ConfirmProfileError::SSHConfirm)?; - match ssh_confirm_exit_status.code() { + match ssh_confirm_cmd_handle.status.code() { Some(0) => (), a => return Err(ConfirmProfileError::SSHConfirmExit(a)), }; @@ -344,6 +349,9 @@ pub async fn deploy_profile( wait: WaitCommand<'_>, confirm: ConfirmCommand<'_>, ) -> Result<(), DeployProfileError> { + + debug!("Entering deploy_profile function ..."); + if !activate.dry_activate { info!( "Activating profile `{}` for node `{}`", @@ -355,18 +363,16 @@ pub async fn deploy_profile( let activate_cmd = activate.build(); - debug!("Constructed activation command: {}", activate_cmd); - let mut ssh_activate_cmd = ssh.build(); if *no_magic_rollback || *dry_activate { - let ssh_activate_exit_status = ssh_activate_cmd + let ssh_activate_cmd_handle = ssh_activate_cmd .arg(activate_cmd) - .status() + .output() .await .map_err(DeployProfileError::SSHActivate)?; - match ssh_activate_exit_status.code() { + match ssh_activate_cmd_handle.status.code() { Some(0) => (), a => return Err(DeployProfileError::SSHActivateExit(a)), }; @@ -377,9 +383,6 @@ pub async fn deploy_profile( info!("Success activating, done!"); } } else { - let wait_cmd = wait.build(); - - debug!("Constructed wait command: {}", wait_cmd); let ssh_activate = ssh_activate_cmd .arg(activate_cmd) @@ -387,7 +390,7 @@ pub async fn deploy_profile( .map_err(DeployProfileError::SSHSpawnActivate)?; info!("Creating activation waiter"); - + let wait_cmd = wait.build(); let mut ssh_wait_cmd = ssh.build(); let (send_activate, recv_activate) = tokio::sync::oneshot::channel(); @@ -411,9 +414,9 @@ pub async fn deploy_profile( send_activated.send(()).unwrap(); }); tokio::select! { - x = ssh_wait_cmd.arg(wait_cmd).status() => { + x = ssh_wait_cmd.arg(wait_cmd).output() => { debug!("Wait command ended"); - match x.map_err(DeployProfileError::SSHWait)?.code() { + match x.map_err(DeployProfileError::SSHWait)?.status.code() { Some(0) => (), a => return Err(DeployProfileError::SSHWaitExit(a)), }; @@ -454,13 +457,15 @@ pub async fn revoke( ssh: SshCommand<'_>, revoke: RevokeCommand<'_>, ) -> Result<(), RevokeProfileError> { + + debug!("Entering revoke function ..."); + info!( "Revoking profile `{}` for node `{}`", profile_name, node_name ); let revoke_cmd = revoke.build(); - debug!("Constructed revoke command: {}", revoke_cmd); let mut ssh_revoke_cmd = ssh.build(); diff --git a/src/push.rs b/src/push.rs index 5bcc6c51..4af760ff 100644 --- a/src/push.rs +++ b/src/push.rs @@ -5,7 +5,6 @@ use log::{debug, info}; use std::collections::HashMap; use std::path::Path; -use std::process::Stdio; use thiserror::Error; use tokio::process::Command; @@ -65,6 +64,8 @@ impl<'a> ShowDerivationCommand<'a> { cmd.arg("show-derivation").arg(&self.closure); //cmd.what_is_this; + + debug!("Built command: ShowDerivationCommand -> {:?}", cmd); cmd } } @@ -89,6 +90,8 @@ impl<'a> SignCommand<'a> { .arg(local_key) .arg(&self.closure); //cmd.what_is_this; + + debug!("Built command: SignCommand -> {:?}", cmd); cmd } } @@ -138,6 +141,8 @@ impl<'a> CopyCommand<'a> { .arg(self.closure) .env("NIX_SSHOPTS", self.nix_ssh_opts); //cmd.what_is_this; + + debug!("Built command: CopyCommand -> {:?}", cmd); cmd } } @@ -184,6 +189,8 @@ impl<'a> BuildCommand<'a> { }; cmd.args(self.extra_build_args.iter()); // cmd.what_is_this; + + debug!("Built command: BuildCommand -> {:?}", cmd); cmd } } @@ -195,11 +202,13 @@ pub async fn push_profile( sign: SignCommand<'_>, copy: CopyCommand<'_>, ) -> Result<(), PushProfileError> { + + debug!("Entering push_profil function ..."); + let node_name = build.node_name; let profile_name = build.profile_name; let closure = show_derivation.closure; - debug!("Finding the deriver of store path for {}", closure); let mut show_derivation_cmd = show_derivation.build(); let show_derivation_output = show_derivation_cmd @@ -230,14 +239,12 @@ pub async fn push_profile( let mut build_cmd = build.build(*derivation_name, supports_flakes); - let build_exit_status = build_cmd - // Logging should be in stderr, this just stops the store path from printing for no reason - .stdout(Stdio::null()) - .status() + let build_cmd_handle = build_cmd + .output() .await .map_err(PushProfileError::Build)?; - match build_exit_status.code() { + match build_cmd_handle.status.code() { Some(0) => (), a => return Err(PushProfileError::BuildExit(a)), }; @@ -257,9 +264,9 @@ pub async fn push_profile( ); let mut sign_cmd = sign.build(local_key); - let sign_exit_status = sign_cmd.status().await.map_err(PushProfileError::Sign)?; + let sign_cmd_handle = sign_cmd.output().await.map_err(PushProfileError::Sign)?; - match sign_exit_status.code() { + match sign_cmd_handle.status.code() { Some(0) => (), a => return Err(PushProfileError::SignExit(a)), }; @@ -269,9 +276,9 @@ pub async fn push_profile( let mut copy_cmd = copy.build(); - let copy_exit_status = copy_cmd.status().await.map_err(PushProfileError::Copy)?; + let copy_exit_cmd_handle = copy_cmd.output().await.map_err(PushProfileError::Copy)?; - match copy_exit_status.code() { + match copy_exit_cmd_handle.status.code() { Some(0) => (), a => return Err(PushProfileError::CopyExit(a)), }; From f70cb4fa894cae354cd76e1e5d83777d15f10d73 Mon Sep 17 00:00:00 2001 From: David Arnold Date: Thu, 2 Dec 2021 19:41:05 -0500 Subject: [PATCH 41/53] fix: fast connection - if the connection is fast, DO NOT substitute on target - if the connection is slow, DO substitute on target --- src/push.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/push.rs b/src/push.rs index 4af760ff..6beef2b5 100644 --- a/src/push.rs +++ b/src/push.rs @@ -129,7 +129,7 @@ impl<'a> CopyCommand<'a> { cmd.arg("copy"); - if self.fast_connection { + if !self.fast_connection { cmd.arg("--substitute-on-destination"); } From fe35ea786d6c189a98e41820f0815201a362b22a Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 10 Dec 2021 11:30:22 -0700 Subject: [PATCH 42/53] fmt: cargo fmt --- src/data.rs | 38 ++++++++++++++++++-------------------- src/deploy.rs | 9 ++++----- src/push.rs | 18 +++++++----------- 3 files changed, 29 insertions(+), 36 deletions(-) diff --git a/src/data.rs b/src/data.rs index a545d113..272eccd5 100644 --- a/src/data.rs +++ b/src/data.rs @@ -7,8 +7,8 @@ use clap::Parser; use linked_hash_set::LinkedHashSet; use merge::Merge; use rnix::{types::*, SyntaxKind::*}; -use thiserror::Error; use std::net::{SocketAddr, ToSocketAddrs}; +use thiserror::Error; use crate::settings; @@ -73,11 +73,7 @@ impl<'a> Target { } }; Ok({ - let hostname_: Option = if ip.is_some() { - ip - } else { - hostname - }; + let hostname_: Option = if ip.is_some() { ip } else { hostname }; let d = DeployData::new( repo, node.to_owned(), @@ -182,7 +178,6 @@ impl std::str::FromStr for Target { }; }; - let mut node: Option = None; let mut profile: Option = None; @@ -197,7 +192,7 @@ impl std::str::FromStr for Target { node: None, profile: None, ip, // NB: error if not none; catched on target resolve - }) + }); } }; @@ -291,9 +286,7 @@ fn test_deploy_target_from_str() { ); assert_eq!( - "../examples/system#example" - .parse::() - .unwrap(), + "../examples/system#example".parse::().unwrap(), Target { repo: "../examples/system".to_string(), node: Some("example".to_string()), @@ -340,7 +333,6 @@ pub struct DeployData<'a> { // over potentially a series of sockets to deploy // to // pub sockets: Vec, - pub ssh_user: String, pub ssh_uri: String, pub temp_path: String, @@ -450,18 +442,24 @@ impl<'a> DeployData<'a> { }; let hostname = match hostname { Some(x) => x, - None => if let Some(x) = &node.node_settings.hostname { - x.to_string() - } else { - return Err(DeployDataError::NoHost(node_name)); - }, + None => { + if let Some(x) = &node.node_settings.hostname { + x.to_string() + } else { + return Err(DeployDataError::NoHost(node_name)); + } + } }; let maybe_iter = &mut hostname[..].to_socket_addrs(); let sockets: Vec = match maybe_iter { Ok(x) => x.into_iter().collect(), - Err(err) => return Err( - DeployDataError::InvalidSockent(repo, hostname, err.to_string()), - ), + Err(err) => { + return Err(DeployDataError::InvalidSockent( + repo, + hostname, + err.to_string(), + )) + } }; let ssh_uri = format!("ssh://{}@{}", &ssh_user, sockets.first().unwrap()); diff --git a/src/deploy.rs b/src/deploy.rs index cef64458..1a25e0fc 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -18,7 +18,10 @@ pub struct SshCommand<'a> { impl<'a> SshCommand<'a> { pub fn from_data(d: &'a data::DeployData) -> Result { let opts = d.merged_settings.ssh_opts.as_ref(); - Ok(SshCommand { ssh_uri: d.ssh_uri.as_ref(), opts }) + Ok(SshCommand { + ssh_uri: d.ssh_uri.as_ref(), + opts, + }) } fn build(&self) -> Command { @@ -299,7 +302,6 @@ pub async fn confirm_profile( ssh: SshCommand<'_>, confirm: ConfirmCommand<'_>, ) -> Result<(), ConfirmProfileError> { - debug!("Entering confirm_profile function ..."); let mut ssh_confirm_cmd = ssh.build(); @@ -349,7 +351,6 @@ pub async fn deploy_profile( wait: WaitCommand<'_>, confirm: ConfirmCommand<'_>, ) -> Result<(), DeployProfileError> { - debug!("Entering deploy_profile function ..."); if !activate.dry_activate { @@ -383,7 +384,6 @@ pub async fn deploy_profile( info!("Success activating, done!"); } } else { - let ssh_activate = ssh_activate_cmd .arg(activate_cmd) .spawn() @@ -457,7 +457,6 @@ pub async fn revoke( ssh: SshCommand<'_>, revoke: RevokeCommand<'_>, ) -> Result<(), RevokeProfileError> { - debug!("Entering revoke function ..."); info!( diff --git a/src/push.rs b/src/push.rs index 6beef2b5..e1949a10 100644 --- a/src/push.rs +++ b/src/push.rs @@ -113,12 +113,12 @@ impl<'a> CopyCommand<'a> { fast_connection: d.merged_settings.fast_connection, check_sigs: &d.flags.checksigs, hostname: uri, - nix_ssh_opts: format!("{} -p {}", - d - .merged_settings - .ssh_opts - .iter() - .fold("".to_string(), |s, o| format!("{} {}", s, o)), + nix_ssh_opts: format!( + "{} -p {}", + d.merged_settings + .ssh_opts + .iter() + .fold("".to_string(), |s, o| format!("{} {}", s, o)), port, ), } @@ -202,7 +202,6 @@ pub async fn push_profile( sign: SignCommand<'_>, copy: CopyCommand<'_>, ) -> Result<(), PushProfileError> { - debug!("Entering push_profil function ..."); let node_name = build.node_name; @@ -239,10 +238,7 @@ pub async fn push_profile( let mut build_cmd = build.build(*derivation_name, supports_flakes); - let build_cmd_handle = build_cmd - .output() - .await - .map_err(PushProfileError::Build)?; + let build_cmd_handle = build_cmd.output().await.map_err(PushProfileError::Build)?; match build_cmd_handle.status.code() { Some(0) => (), From 64ca7a87ad6c28612d4bf4d52aabaf353f436321 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 10 Dec 2021 11:30:40 -0700 Subject: [PATCH 43/53] flake.lock: update to rust 1.57.0 --- flake.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 37846f20..1160afe2 100644 --- a/flake.lock +++ b/flake.lock @@ -6,11 +6,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1637303083, - "narHash": "sha256-e2A5JBjxYNpjoGd53K0oVUUaS9ojwOT5rnThyPNS46M=", + "lastModified": 1639117493, + "narHash": "sha256-67H9uXUdauaqMfkcKzpgHR3GeOKPAwOs6G3C1VpT67o=", "owner": "nix-community", "repo": "fenix", - "rev": "8294ceadbbbe1a886640bfcc15f5a02a2b471955", + "rev": "94b5686cad2ed210da106b0b7e1e212dab43fbf2", "type": "github" }, "original": { @@ -37,11 +37,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1636976544, - "narHash": "sha256-9ZmdyoRz4Qu8bP5BKR1T10YbzcB9nvCeQjOEw2cRKR0=", + "lastModified": 1638986258, + "narHash": "sha256-OceRdctKZRSgqQxVRvvNB0MaEnFMzQqjUffecoDE9eI=", "owner": "nixos", "repo": "nixpkgs", - "rev": "931ab058daa7e4cd539533963f95e2bb0dbd41e6", + "rev": "581d2d6c9cd5c289002203581d8aa0861963a933", "type": "github" }, "original": { @@ -65,11 +65,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1637268320, - "narHash": "sha256-lxB1r+7cmZisiGLx0tZ2LaC6X/EcQTbRIWZfnLIIgs4=", + "lastModified": 1639071661, + "narHash": "sha256-4YySLORuK0qGGIEJj78S7CZ4jy4GIHJ5ks17k5AWblo=", "owner": "rust-analyzer", "repo": "rust-analyzer", - "rev": "f0da9406bcbde1bc727242b481d8de825e84f59a", + "rev": "2534b7db1a093543d5bd759b3a1ca9e34418fa31", "type": "github" }, "original": { From 55b397d5d8a53a4ed6dca68177f1493cd70eb6d3 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 10 Dec 2021 11:51:20 -0700 Subject: [PATCH 44/53] show copy progress --- src/push.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/push.rs b/src/push.rs index e1949a10..ddefda11 100644 --- a/src/push.rs +++ b/src/push.rs @@ -272,9 +272,9 @@ pub async fn push_profile( let mut copy_cmd = copy.build(); - let copy_exit_cmd_handle = copy_cmd.output().await.map_err(PushProfileError::Copy)?; + let copy_exit_cmd_handle = copy_cmd.status().await.map_err(PushProfileError::Copy)?; - match copy_exit_cmd_handle.status.code() { + match copy_exit_cmd_handle.code() { Some(0) => (), a => return Err(PushProfileError::CopyExit(a)), }; From 0c02577acd42015b9575594dea31737ec033dc06 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Fri, 10 Dec 2021 12:06:05 -0700 Subject: [PATCH 45/53] cargo: update deps --- Cargo.lock | 96 +++++++++++++++++------------------------------------- Cargo.toml | 2 +- 2 files changed, 31 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92a1af08..5f3155ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,9 +70,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.0.0-beta.5" +version = "3.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feff3878564edb93745d58cf63e17b63f24142506e7a20c87a5521ed7bfb1d63" +checksum = "098d281b47bf725a0bddd829e0070ee76560faab8af123050a86c440d7f0a1fd" dependencies = [ "atty", "bitflags", @@ -83,14 +83,13 @@ dependencies = [ "strsim", "termcolor", "textwrap", - "unicase", ] [[package]] name = "clap_derive" -version = "3.0.0-beta.5" +version = "3.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b15c6b4f786ffb6192ffe65a36855bc1fc2444bcd0945ae16748dcd6ed7d0d3" +checksum = "26de8102ffb96701066cea36f9a104285b67fbcc302a520640289d476c15ed8a" dependencies = [ "heck", "proc-macro-error", @@ -207,18 +206,16 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "629316e42fe7c2a0b9a65b47d159ceaa5453ab14e8f0a3c5eedbb8cd55b4a445" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "a89f17b21645bc4ed773c69af9c9a0effd4a3f1a3876eadd453469f8854e7fdd" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -226,24 +223,21 @@ dependencies = [ [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "dabf1872aaab32c886832f2276d2f5399887e2bd613698a02359e4ea83f8de12" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "41d22213122356472061ac0f1ab2cee28d2bac8491410fd68c2af53d1cedb83e" dependencies = [ - "autocfg", "futures-core", "futures-macro", "futures-task", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -350,9 +344,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.107" +version = "0.2.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" +checksum = "f98a04dce437184842841303488f70d0188c5f51437d2a834dc097eafa909a01" [[package]] name = "linked-hash-map" @@ -501,9 +495,9 @@ checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "os_str_bytes" -version = "4.2.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addaa943333a514159c80c97ff4a93306530d965d27e139188283cd13e06a799" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" dependencies = [ "memchr", ] @@ -569,23 +563,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a" dependencies = [ "unicode-xid", ] @@ -655,9 +637,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" [[package]] name = "same-file" @@ -676,18 +658,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "b4ad69dfbd3e45369132cc64e6748c2d65cdfb001a2b1c232d128b4ad60561c1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "b710a83c4e0dff6a3d511946b95274ad9ca9e5d3ae497b63fda866ac955358d2" dependencies = [ "proc-macro2", "quote", @@ -696,9 +678,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" +checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" dependencies = [ "itoa", "ryu", @@ -707,9 +689,9 @@ dependencies = [ [[package]] name = "signal-hook" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" +checksum = "c35dfd12afb7828318348b8c408383cf5071a086c1d4ab1c0f9840ec92dbb922" dependencies = [ "libc", "signal-hook-registry", @@ -750,9 +732,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" dependencies = [ "proc-macro2", "quote", @@ -779,9 +761,6 @@ name = "textwrap" version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" -dependencies = [ - "unicode-width", -] [[package]] name = "thin-dst" @@ -860,27 +839,12 @@ dependencies = [ "serde", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-segmentation" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" -[[package]] -name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - [[package]] name = "unicode-xid" version = "0.2.2" diff --git a/Cargo.toml b/Cargo.toml index 35fbd150..8477a6c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "3.0.0-beta.5" +clap = { version = "3.0.0-rc.3", features = [ "derive", "env" ] } flexi_logger = "0.16" futures-util = "0.3.6" linked_hash_set = "0.1.4" From 7267eab9139ca4476fd8ed49e2e8652bba4c037f Mon Sep 17 00:00:00 2001 From: David Arnold Date: Fri, 17 Dec 2021 14:50:54 -0500 Subject: [PATCH 46/53] imp: apply source filter - this will safe some rebuild cycles and half a megaton of CO2 over 6monts (ok, I made that last one up) --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index d087b13c..d1f28beb 100644 --- a/flake.nix +++ b/flake.nix @@ -39,9 +39,9 @@ pname = "deploy-rs"; version = "0.1.0"; - src = self; + src = nixpkgs.lib.cleanSource ./.; - cargoLock.lockFile = "${self}/Cargo.lock"; + cargoLock.lockFile = ./Cargo.lock; }) // { meta.description = "A Simple multi-profile Nix-flake deploy tool"; }; lib = rec { From 1c60c23e226348ec20c478cc888495c103249a33 Mon Sep 17 00:00:00 2001 From: Craige McWhirter Date: Wed, 23 Feb 2022 17:30:11 +1000 Subject: [PATCH 47/53] nix: ran nixfmt across flake.nix --- flake.nix | 208 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 115 insertions(+), 93 deletions(-) diff --git a/flake.nix b/flake.nix index d1f28beb..148506ff 100644 --- a/flake.nix +++ b/flake.nix @@ -16,48 +16,46 @@ fenix.url = "github:nix-community/fenix"; }; - outputs = { self, nixpkgs, utils, fenix, ... }: let - toolchain = "stable"; - in - { - overlay = final: prev: - let - system = final.system; - darwinOptions = final.lib.optionalAttrs final.stdenv.isDarwin { - buildInputs = with final.darwin.apple_sdk.frameworks; [ - SystemConfiguration - CoreServices - ]; - }; - in - { - deploy-rs = { - - deploy-rs = (final.makeRustPlatform { - inherit (final.fenix.${toolchain}) cargo rustc; - }).buildRustPackage (darwinOptions // { - pname = "deploy-rs"; - version = "0.1.0"; - - src = nixpkgs.lib.cleanSource ./.; - - cargoLock.lockFile = ./Cargo.lock; - }) // { meta.description = "A Simple multi-profile Nix-flake deploy tool"; }; - - lib = rec { - - setActivate = builtins.trace - "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" - activate.custom; - - activate = rec { - custom = - { - __functor = customSelf: base: activate: - (final.buildEnv { - name = ("activatable-" + base.name); - paths = - [ + outputs = { self, nixpkgs, utils, fenix, ... }: + let toolchain = "stable"; + in { + overlay = final: prev: + let + system = final.system; + darwinOptions = final.lib.optionalAttrs final.stdenv.isDarwin { + buildInputs = with final.darwin.apple_sdk.frameworks; [ + SystemConfiguration + CoreServices + ]; + }; + in { + deploy-rs = { + + deploy-rs = (final.makeRustPlatform { + inherit (final.fenix.${toolchain}) cargo rustc; + }).buildRustPackage (darwinOptions // { + pname = "deploy-rs"; + version = "0.1.0"; + + src = nixpkgs.lib.cleanSource ./.; + + cargoLock.lockFile = ./Cargo.lock; + }) // { + meta.description = "A Simple multi-profile Nix-flake deploy tool"; + }; + + lib = rec { + + setActivate = builtins.trace + "deploy-rs#lib.setActivate is deprecated, use activate.noop, activate.nixos or activate.custom instead" + activate.custom; + + activate = rec { + custom = { + __functor = customSelf: base: activate: + (final.buildEnv { + name = ("activatable-" + base.name); + paths = [ base (final.writeTextFile { name = base.name + "-activate-path"; @@ -67,7 +65,11 @@ if [[ "''${DRY_ACTIVATE:-}" == "1" ]] then - ${customSelf.dryActivate or "echo ${final.writeScript "activate" activate}"} + ${ + customSelf.dryActivate or "echo ${ + final.writeScript "activate" activate + }" + } else ${activate} fi @@ -79,7 +81,9 @@ name = base.name + "-activate-rs"; text = '' #!${final.runtimeShell} - exec ${self.defaultPackage.${system}}/bin/activate "$@" + exec ${ + self.defaultPackage.${system} + }/bin/activate "$@" ''; executable = true; destination = "/activate-rs"; @@ -88,53 +92,74 @@ } // customSelf); }; - nixos = base: (custom // { inherit base; dryActivate = "$PROFILE/bin/switch-to-configuration dry-activate"; }) base.config.system.build.toplevel '' - # work around https://github.com/NixOS/nixpkgs/issues/73404 - cd /tmp - - $PROFILE/bin/switch-to-configuration switch - - # https://github.com/serokell/deploy-rs/issues/31 - ${with base.config.boot.loader; - final.lib.optionalString systemd-boot.enable - "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} - ''; - - home-manager = base: custom base.activationPackage "$PROFILE/activate"; - - noop = base: custom base ":"; - }; - - deployChecks = deploy: builtins.mapAttrs (_: check: check deploy) { - schema = deploy: final.runCommandNoCC "jsonschema-deploy-system" { } '' - ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${final.writeText "deploy.json" (builtins.toJSON deploy)} ${self}/interface.json && touch $out - ''; - - activate = deploy: - let - profiles = builtins.concatLists (final.lib.mapAttrsToList (nodeName: node: final.lib.mapAttrsToList (profileName: profile: [ (toString profile.path) nodeName profileName ]) node.profiles) deploy.nodes); - in - final.runCommandNoCC "deploy-rs-check-activate" { } '' - for x in ${builtins.concatStringsSep " " (map (p: builtins.concatStringsSep ":" p) profiles)}; do - profile_path=$(echo $x | cut -f1 -d:) - node_name=$(echo $x | cut -f2 -d:) - profile_name=$(echo $x | cut -f3 -d:) - - test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); - - test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); - done - - touch $out - ''; + nixos = base: + (custom // { + inherit base; + dryActivate = + "$PROFILE/bin/switch-to-configuration dry-activate"; + }) base.config.system.build.toplevel '' + # work around https://github.com/NixOS/nixpkgs/issues/73404 + cd /tmp + + $PROFILE/bin/switch-to-configuration switch + + # https://github.com/serokell/deploy-rs/issues/31 + ${with base.config.boot.loader; + final.lib.optionalString systemd-boot.enable + "sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"} + ''; + + home-manager = base: + custom base.activationPackage "$PROFILE/activate"; + + noop = base: custom base ":"; + }; + + deployChecks = deploy: + builtins.mapAttrs (_: check: check deploy) { + schema = deploy: + final.runCommandNoCC "jsonschema-deploy-system" { } '' + ${final.python3.pkgs.jsonschema}/bin/jsonschema -i ${ + final.writeText "deploy.json" (builtins.toJSON deploy) + } ${self}/interface.json && touch $out + ''; + + activate = deploy: + let + profiles = builtins.concatLists (final.lib.mapAttrsToList + (nodeName: node: + final.lib.mapAttrsToList (profileName: profile: [ + (toString profile.path) + nodeName + profileName + ]) node.profiles) deploy.nodes); + in final.runCommandNoCC "deploy-rs-check-activate" { } '' + for x in ${ + builtins.concatStringsSep " " + (map (p: builtins.concatStringsSep ":" p) profiles) + }; do + profile_path=$(echo $x | cut -f1 -d:) + node_name=$(echo $x | cut -f2 -d:) + profile_name=$(echo $x | cut -f3 -d:) + + test -f "$profile_path/deploy-rs-activate" || (echo "#$node_name.$profile_name is missing the deploy-rs-activate activation script" && exit 1); + + test -f "$profile_path/activate-rs" || (echo "#$node_name.$profile_name is missing the activate-rs activation script" && exit 1); + done + + touch $out + ''; + }; }; }; }; - }; - } // - utils.lib.eachSystem (utils.lib.defaultSystems ++ ["aarch64-darwin"]) (system: + } // utils.lib.eachSystem (utils.lib.defaultSystems ++ [ "aarch64-darwin" ]) + (system: let - pkgs = import nixpkgs { inherit system; overlays = [ self.overlay fenix.overlay ]; }; + pkgs = import nixpkgs { + inherit system; + overlays = [ self.overlay fenix.overlay ]; + }; rustPkg = pkgs.fenix.${toolchain}.withComponents [ "cargo" "clippy" @@ -154,15 +179,12 @@ devShell = pkgs.mkShell { RUST_SRC_PATH = "${rustPkg}/lib/rustlib/src/rust/library"; - buildInputs = with pkgs; [ - rust-analyzer-nightly - reuse - rustPkg - ]; + buildInputs = with pkgs; [ rust-analyzer-nightly reuse rustPkg ]; }; checks = { - deploy-rs = self.defaultPackage.${system}.overrideAttrs (super: { doCheck = true; }); + deploy-rs = self.defaultPackage.${system}.overrideAttrs + (super: { doCheck = true; }); }; lib = pkgs.deploy-rs.lib; From feb44f80c634c799a661bf27c5cb4a905640fe93 Mon Sep 17 00:00:00 2001 From: Craige McWhirter Date: Wed, 23 Feb 2022 17:38:22 +1000 Subject: [PATCH 48/53] fenix: corrected inputs for fenix and nixpkgs --- flake.lock | 34 ++++++++++++++++------------------ flake.nix | 7 +++++-- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/flake.lock b/flake.lock index 1160afe2..0d7b1725 100644 --- a/flake.lock +++ b/flake.lock @@ -2,15 +2,17 @@ "nodes": { "fenix": { "inputs": { - "nixpkgs": "nixpkgs", + "nixpkgs": [ + "nixpkgs" + ], "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1639117493, - "narHash": "sha256-67H9uXUdauaqMfkcKzpgHR3GeOKPAwOs6G3C1VpT67o=", + "lastModified": 1645597478, + "narHash": "sha256-axsWwzGMMMcvHKXyrEC99RHkU/8EecIcmrESGzZMD/k=", "owner": "nix-community", "repo": "fenix", - "rev": "94b5686cad2ed210da106b0b7e1e212dab43fbf2", + "rev": "6c8d60c1d8deba8c360537c47e2b86aefaea0fd5", "type": "github" }, "original": { @@ -37,39 +39,35 @@ }, "nixpkgs": { "locked": { - "lastModified": 1638986258, - "narHash": "sha256-OceRdctKZRSgqQxVRvvNB0MaEnFMzQqjUffecoDE9eI=", - "owner": "nixos", + "lastModified": 1645433236, + "narHash": "sha256-4va4MvJ076XyPp5h8sm5eMQvCrJ6yZAbBmyw95dGyw4=", + "owner": "NixOS", "repo": "nixpkgs", - "rev": "581d2d6c9cd5c289002203581d8aa0861963a933", + "rev": "7f9b6e2babf232412682c09e57ed666d8f84ac2d", "type": "github" }, "original": { - "owner": "nixos", + "id": "nixpkgs", "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" + "type": "indirect" } }, "root": { "inputs": { "fenix": "fenix", "flake-compat": "flake-compat", - "nixpkgs": [ - "fenix", - "nixpkgs" - ], + "nixpkgs": "nixpkgs", "utils": "utils" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1639071661, - "narHash": "sha256-4YySLORuK0qGGIEJj78S7CZ4jy4GIHJ5ks17k5AWblo=", + "lastModified": 1645480664, + "narHash": "sha256-1+6YSK1hn6PX5qC3JwjrYktMwtq5GeFgNbyaGzk8Kuo=", "owner": "rust-analyzer", "repo": "rust-analyzer", - "rev": "2534b7db1a093543d5bd759b3a1ca9e34418fa31", + "rev": "c0ee2f23ff70349704dfe8448027a41b7788eb37", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 148506ff..193bf179 100644 --- a/flake.nix +++ b/flake.nix @@ -7,13 +7,16 @@ description = "A Simple multi-profile Nix-flake deploy tool."; inputs = { - nixpkgs.follows = "fenix/nixpkgs"; utils.url = "github:numtide/flake-utils"; flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; - fenix.url = "github:nix-community/fenix"; + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + nixpkgs.url = "nixpkgs/nixos-unstable"; }; outputs = { self, nixpkgs, utils, fenix, ... }: From dabb79d029b5fa3e870d7e901c489db17ca6529e Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Tue, 16 Aug 2022 16:48:11 -0600 Subject: [PATCH 49/53] build: show command output --- src/deploy.rs | 14 +++++++++----- src/push.rs | 22 +++++++++++++++------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/deploy.rs b/src/deploy.rs index 1a25e0fc..753aa2be 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -369,11 +369,15 @@ pub async fn deploy_profile( if *no_magic_rollback || *dry_activate { let ssh_activate_cmd_handle = ssh_activate_cmd .arg(activate_cmd) - .output() - .await - .map_err(DeployProfileError::SSHActivate)?; - - match ssh_activate_cmd_handle.status.code() { + .spawn() + .map_err(DeployProfileError::SSHActivate)? + .wait() + .await; + + match ssh_activate_cmd_handle + .map_err(DeployProfileError::SSHActivate)? + .code() + { Some(0) => (), a => return Err(DeployProfileError::SSHActivateExit(a)), }; diff --git a/src/push.rs b/src/push.rs index ddefda11..79feb421 100644 --- a/src/push.rs +++ b/src/push.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use log::{debug, info}; +use log::{debug, error, info}; use std::collections::HashMap; use std::path::Path; use thiserror::Error; @@ -127,7 +127,7 @@ impl<'a> CopyCommand<'a> { fn build(self) -> Command { let mut cmd = Command::new("nix"); - cmd.arg("copy"); + cmd.arg("-L").arg("copy"); if !self.fast_connection { cmd.arg("--substitute-on-destination"); @@ -174,7 +174,7 @@ impl<'a> BuildCommand<'a> { }; if supports_flakes { - cmd.arg("build").arg(derivation_name) + cmd.arg("-L").arg("build").arg(derivation_name) } else { cmd.arg(derivation_name) }; @@ -238,9 +238,13 @@ pub async fn push_profile( let mut build_cmd = build.build(*derivation_name, supports_flakes); - let build_cmd_handle = build_cmd.output().await.map_err(PushProfileError::Build)?; + let build_cmd_handle = build_cmd + .spawn() + .map_err(PushProfileError::Build)? + .wait() + .await; - match build_cmd_handle.status.code() { + match build_cmd_handle.map_err(PushProfileError::Build)?.code() { Some(0) => (), a => return Err(PushProfileError::BuildExit(a)), }; @@ -272,9 +276,13 @@ pub async fn push_profile( let mut copy_cmd = copy.build(); - let copy_exit_cmd_handle = copy_cmd.status().await.map_err(PushProfileError::Copy)?; + let copy_exit_cmd_handle = copy_cmd + .spawn() + .map_err(PushProfileError::Copy)? + .wait() + .await; - match copy_exit_cmd_handle.code() { + match copy_exit_cmd_handle.map_err(PushProfileError::Copy)?.code() { Some(0) => (), a => return Err(PushProfileError::CopyExit(a)), }; From 9e449da39efc128ca460b94c04cf9a918c014d17 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Tue, 16 Aug 2022 17:31:38 -0600 Subject: [PATCH 50/53] invert rollback settings A lot of the time rollbacks are just causing a big headache --- src/cli.rs | 6 +++--- src/deploy.rs | 24 ++++++++++++------------ src/settings.rs | 8 ++++---- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 7e72de9b..88ff14f2 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -207,7 +207,7 @@ async fn run_deploy( // Run all deployments // In case of an error rollback any previoulsy made deployment. - // Rollbacks adhere to the global seeting to no_auto_rollback and secondary + // Rollbacks adhere to the global seeting to auto_rollback and secondary // the profile's configuration for deploy_data in &parts { if let Err(e) = deploy::deploy::deploy_profile( @@ -224,13 +224,13 @@ async fn run_deploy( if cmd_flags.dry_activate { info!("dry run, not rolling back"); } - if cmd_flags.rollback_succeeded && !cmd_settings.no_auto_rollback { + if cmd_flags.rollback_succeeded && cmd_settings.auto_rollback { info!("Revoking previous deploys"); // revoking all previous deploys // (adheres to profile configuration if not set explicitely by // the command line) for deploy_data in &succeeded { - if !deploy_data.merged_settings.no_auto_rollback { + if deploy_data.merged_settings.auto_rollback { deploy::deploy::revoke( &deploy_data.node_name, &deploy_data.profile_name, diff --git a/src/deploy.rs b/src/deploy.rs index 753aa2be..8c522261 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -39,9 +39,9 @@ pub struct ActivateCommand<'a> { profile_path: &'a str, temp_path: &'a str, closure: &'a str, - no_auto_rollback: bool, + auto_rollback: bool, confirm_timeout: u16, - no_magic_rollback: bool, + magic_rollback: bool, debug_logs: bool, log_dir: Option<&'a str>, dry_activate: bool, @@ -54,9 +54,9 @@ impl<'a> ActivateCommand<'a> { profile_path: &d.profile_path, temp_path: &d.temp_path, closure: &d.profile.profile_settings.path, - no_auto_rollback: d.merged_settings.no_auto_rollback, + auto_rollback: d.merged_settings.auto_rollback, confirm_timeout: d.merged_settings.confirm_timeout.unwrap_or(30), - no_magic_rollback: d.merged_settings.no_magic_rollback, + magic_rollback: d.merged_settings.magic_rollback, debug_logs: d.flags.debug_logs, log_dir: d.flags.log_dir.as_deref(), dry_activate: d.flags.dry_activate, @@ -81,11 +81,11 @@ impl<'a> ActivateCommand<'a> { cmd = format!("{} --confirm-timeout {}", cmd, self.confirm_timeout); - if !self.no_magic_rollback { + if self.magic_rollback { cmd = format!("{} --magic-rollback", cmd); } - if !self.no_auto_rollback { + if self.auto_rollback { cmd = format!("{} --auto-rollback", cmd); } @@ -107,11 +107,11 @@ fn test_activation_command_builder() { let sudo = Some("sudo -u test"); let profile_path = "/blah/profiles/test"; let closure = "/nix/store/blah/etc"; - let no_auto_rollback = false; + let auto_rollback = true; let dry_activate = false; let temp_path = "/tmp"; let confirm_timeout = 30; - let no_magic_rollback = false; + let magic_rollback = true; let debug_logs = true; let log_dir = Some("/tmp/something.txt"); @@ -120,10 +120,10 @@ fn test_activation_command_builder() { sudo, profile_path, closure, - no_auto_rollback, + auto_rollback, temp_path, confirm_timeout, - no_magic_rollback, + magic_rollback, debug_logs, log_dir, dry_activate @@ -360,13 +360,13 @@ pub async fn deploy_profile( ); } let dry_activate = &activate.dry_activate.clone(); - let no_magic_rollback = &activate.no_magic_rollback.clone(); + let magic_rollback = &activate.magic_rollback.clone(); let activate_cmd = activate.build(); let mut ssh_activate_cmd = ssh.build(); - if *no_magic_rollback || *dry_activate { + if !*magic_rollback || *dry_activate { let ssh_activate_cmd_handle = ssh_activate_cmd .arg(activate_cmd) .spawn() diff --git a/src/settings.rs b/src/settings.rs index e52c6d3b..3a15ee9c 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -35,8 +35,8 @@ pub struct GenericSettings { /// Do not attempt rollback if activation fails #[clap(long)] #[serde(rename(deserialize = "noAutoRollback"), default)] - #[merge(strategy = merge::bool::overwrite_false)] - pub no_auto_rollback: bool, + #[merge(strategy = merge::bool::overwrite_true)] + pub auto_rollback: bool, /// How long activation should wait for confirmation (if using magic-rollback) #[clap(long)] #[serde(rename(deserialize = "confirmTimeout"))] @@ -48,8 +48,8 @@ pub struct GenericSettings { /// Do not do a magic rollback (see documentation) #[clap(long)] #[serde(rename(deserialize = "noMagicRollback"), default)] - #[merge(strategy = merge::bool::overwrite_false)] - pub no_magic_rollback: bool, + #[merge(strategy = merge::bool::overwrite_true)] + pub magic_rollback: bool, } impl GenericSettings { From b086e4d4ba7ec7225f8d42082ff2c059e8c206c8 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Tue, 16 Aug 2022 17:36:34 -0600 Subject: [PATCH 51/53] keep going on error Just log the error instead of quiting the application. --- src/deploy.rs | 8 ++++---- src/push.rs | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/deploy.rs b/src/deploy.rs index 8c522261..cd72b9fe 100644 --- a/src/deploy.rs +++ b/src/deploy.rs @@ -4,7 +4,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use log::{debug, info}; +use log::{debug, error, info}; use thiserror::Error; use tokio::process::Command; @@ -316,7 +316,7 @@ pub async fn confirm_profile( match ssh_confirm_cmd_handle.status.code() { Some(0) => (), - a => return Err(ConfirmProfileError::SSHConfirmExit(a)), + a => error!("{}", ConfirmProfileError::SSHConfirmExit(a)), }; info!("Deployment confirmed."); @@ -379,7 +379,7 @@ pub async fn deploy_profile( .code() { Some(0) => (), - a => return Err(DeployProfileError::SSHActivateExit(a)), + a => error!("{}", DeployProfileError::SSHActivateExit(a)), }; if *dry_activate { @@ -422,7 +422,7 @@ pub async fn deploy_profile( debug!("Wait command ended"); match x.map_err(DeployProfileError::SSHWait)?.status.code() { Some(0) => (), - a => return Err(DeployProfileError::SSHWaitExit(a)), + a => error!("{}",DeployProfileError::SSHWaitExit(a)), }; }, x = recv_activate => { diff --git a/src/push.rs b/src/push.rs index 79feb421..5e5c7e3b 100644 --- a/src/push.rs +++ b/src/push.rs @@ -217,7 +217,7 @@ pub async fn push_profile( match show_derivation_output.status.code() { Some(0) => (), - a => return Err(PushProfileError::ShowDerivationExit(a)), + a => error!("{}", PushProfileError::ShowDerivationExit(a)), }; let derivation_info: HashMap<&str, serde_json::value::Value> = serde_json::from_str( @@ -246,15 +246,15 @@ pub async fn push_profile( match build_cmd_handle.map_err(PushProfileError::Build)?.code() { Some(0) => (), - a => return Err(PushProfileError::BuildExit(a)), + a => error!("{}", PushProfileError::BuildExit(a)), }; if !Path::new(format!("{}/deploy-rs-activate", closure).as_str()).exists() { - return Err(PushProfileError::DeployRsActivateDoesntExist); + error!("{}", PushProfileError::DeployRsActivateDoesntExist); } if !Path::new(format!("{}/activate-rs", closure).as_str()).exists() { - return Err(PushProfileError::ActivateRsDoesntExist); + error!("{}", PushProfileError::ActivateRsDoesntExist); } if let Ok(local_key) = std::env::var("LOCAL_KEY") { @@ -268,7 +268,7 @@ pub async fn push_profile( match sign_cmd_handle.status.code() { Some(0) => (), - a => return Err(PushProfileError::SignExit(a)), + a => error!("{}", PushProfileError::SignExit(a)), }; } @@ -284,7 +284,7 @@ pub async fn push_profile( match copy_exit_cmd_handle.map_err(PushProfileError::Copy)?.code() { Some(0) => (), - a => return Err(PushProfileError::CopyExit(a)), + a => error!("{}", PushProfileError::CopyExit(a)), }; Ok(()) From f81eb2b23291a0e3430703d92b6cbcefac7734d7 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Thu, 18 Aug 2022 15:24:39 -0600 Subject: [PATCH 52/53] skip checks by default --- src/cli.rs | 2 +- src/data.rs | 6 +++--- src/settings.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 88ff14f2..6ab3347d 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -298,7 +298,7 @@ pub async fn run(args: Option) -> Result<(), RunError> { .collect::, data::ParseTargetError>>( )?; - if !opts.flags.skip_checks { + if opts.flags.do_checks { for target in targets.iter() { flake::check_deployment(supports_flakes, &target.repo, &opts.flags.extra_build_args) .await?; diff --git a/src/data.rs b/src/data.rs index 272eccd5..8b66a623 100644 --- a/src/data.rs +++ b/src/data.rs @@ -377,9 +377,9 @@ pub struct Flags { #[clap(short, long)] pub result_path: Option, - /// Skip the automatic pre-build checks - #[clap(short, long, env = "DEPLOY_SKIP_CHECKS")] - pub skip_checks: bool, + /// Do the automatic pre-build checks + #[clap(short = 's', long, env = "DEPLOY_SKIP_CHECKS")] + pub do_checks: bool, /// Make activation wait for confirmation, or roll back after a period of time /// Show what will be activated on the machines #[clap(long)] diff --git a/src/settings.rs b/src/settings.rs index 3a15ee9c..9312161b 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -32,7 +32,7 @@ pub struct GenericSettings { #[serde(rename(deserialize = "fastConnection"), default)] #[merge(strategy = merge::bool::overwrite_false)] pub fast_connection: bool, - /// Do not attempt rollback if activation fails + /// Attempt rollback if activation fails #[clap(long)] #[serde(rename(deserialize = "noAutoRollback"), default)] #[merge(strategy = merge::bool::overwrite_true)] @@ -45,7 +45,7 @@ pub struct GenericSettings { #[clap(long)] #[serde(rename(deserialize = "tempPath"))] pub temp_path: Option, - /// Do not do a magic rollback (see documentation) + /// Do a magic rollback (see documentation) #[clap(long)] #[serde(rename(deserialize = "noMagicRollback"), default)] #[merge(strategy = merge::bool::overwrite_true)] From 4da8eb9fc3e611adf4bbe8c8df5b1fc604c4f906 Mon Sep 17 00:00:00 2001 From: Timothy DeHerrera Date: Mon, 22 Aug 2022 16:49:50 -0600 Subject: [PATCH 53/53] export flake config in the environment Useful so that building the derivations still uses the nix config values set directly in the flake. --- lib/nix_config.nix | 30 ++++++++++++++++++++++++++++++ src/cli.rs | 46 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 72 insertions(+), 4 deletions(-) create mode 100644 lib/nix_config.nix diff --git a/lib/nix_config.nix b/lib/nix_config.nix new file mode 100644 index 00000000..852f7376 --- /dev/null +++ b/lib/nix_config.nix @@ -0,0 +1,30 @@ +with import {}; +with lib; + settings: let + mkValueString = v: + if v == null + then "" + else if isInt v + then toString v + else if isBool v + then boolToString v + else if isFloat v + then floatToString v + else if isList v + then toString v + else if isDerivation v + then toString v + else if builtins.isPath v + then toString v + else if isString v + then v + else if isCoercibleToString v + then toString v + else ""; + + mkKeyValue = k: v: "${escape ["="] k} = ${mkValueString v}"; + + mkKeyValuePairs = attrs: concatStringsSep "\n" (mapAttrsToList mkKeyValue attrs); + in '' + ${mkKeyValuePairs settings} + '' diff --git a/src/cli.rs b/src/cli.rs index 6ab3347d..135048fe 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -13,10 +13,13 @@ use crate as deploy; use self::deploy::{data, flake, settings}; use log::{debug, error, info, warn}; use serde::Serialize; +use std::env; use std::process::Stdio; use thiserror::Error; use tokio::process::Command; +use std::path::{Path, PathBuf}; + /// Simple Rust rewrite of a simple Nix Flake deployment tool #[derive(Parser, Debug, Clone, Default)] #[clap(version = "1.0", author = "Serokell ")] @@ -45,9 +48,7 @@ async fn test_flake_support() -> Result { debug!("Checking for flake support"); Ok(Command::new("nix") - .arg("eval") - .arg("--expr") - .arg("builtins.getFlake") + .args(vec!["eval", "--expr", "builtins.getFlake"]) // This will error on some machines "intentionally", and we don't really need that printing .stdout(Stdio::null()) .stderr(Stdio::null()) @@ -154,7 +155,10 @@ pub enum RunDeployError { PushProfile(#[from] deploy::push::PushProfileError), #[error("Failed to resolve target: {0}")] ResolveTarget(#[from] data::ResolveTargetError), - + #[error("Failed run Nix")] + Nix(#[from] std::io::Error), + #[error("Failed to parse JSON")] + JSON(#[from] serde_json::Error), #[error("Error processing deployment definitions: {0}")] DeployData(#[from] data::DeployDataError), #[error("Failed to make printable TOML of deployment: {0}")] @@ -165,6 +169,23 @@ pub enum RunDeployError { RevokeProfile(#[from] deploy::deploy::RevokeProfileError), } +fn find_flake(starting_directory: &Path) -> Option { + let mut path: PathBuf = starting_directory.into(); + let file = Path::new("flake.nix"); + + loop { + path.push(file); + + if path.is_file() { + break Some(path); + } + + if !(path.pop() && path.pop()) { + // remove file && remove parent + break None; + } + } +} async fn run_deploy( targets: Vec, settings: Vec, @@ -173,6 +194,23 @@ async fn run_deploy( cmd_settings: settings::GenericSettings, cmd_flags: data::Flags, ) -> Result<(), RunDeployError> { + if supports_flakes { + let path = find_flake(Path::new(&env::current_dir()?)).unwrap_or_default(); + let flake = path.to_str().unwrap_or_default(); + let config_cmd = Command::new("nix") + .args(vec!["eval", "--raw", "--impure", "--expr"]) + .arg(format!( + "let flake = import {}; in if flake ? nixConfig then flake.nixConfig else {}", + flake, "{}" + )) + .arg("--apply") + .arg(include_str!("../lib/nix_config.nix")) + .output() + .await?; + if config_cmd.status.success() { + env::set_var("NIX_CONFIG", &*String::from_utf8_lossy(&config_cmd.stdout)); + } + } let deploy_datas_ = targets .into_iter() .zip(&settings)