From b989ccd486438d95c07f6ba708458d0540711043 Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 10 Jul 2025 22:49:20 +0000 Subject: [PATCH 1/3] [spr] changes to main this commit is based on Created using spr 1.3.6-beta.1 [skip ci] --- Cargo.lock | 3 + dev-tools/reconfigurator-cli/src/lib.rs | 64 + .../tests/input/cmds-mupdate-update-flow.txt | 117 ++ .../tests/input/cmds-noop-image-source.txt | 6 +- .../output/cmds-mupdate-update-flow-stderr | 0 .../output/cmds-mupdate-update-flow-stdout | 1621 +++++++++++++++++ .../output/cmds-noop-image-source-stdout | 44 +- .../cmds-set-remove-mupdate-override-stdout | 2 +- nexus/reconfigurator/blippy/Cargo.toml | 2 +- nexus/reconfigurator/blippy/src/blippy.rs | 23 + nexus/reconfigurator/blippy/src/checks.rs | 113 ++ nexus/reconfigurator/planning/Cargo.toml | 2 + .../planning/src/blueprint_builder/builder.rs | 263 ++- .../src/blueprint_editor/sled_editor.rs | 118 +- nexus/reconfigurator/planning/src/planner.rs | 225 ++- nexus/reconfigurator/planning/src/system.rs | 57 + nexus/types/src/deployment.rs | 26 +- sled-agent/config-reconciler/src/ledger.rs | 134 +- 18 files changed, 2748 insertions(+), 72 deletions(-) create mode 100644 dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stderr create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout diff --git a/Cargo.lock b/Cargo.lock index 708fa9c473e..1e70a2bf944 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6550,6 +6550,7 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", + "tufaceous-artifact", ] [[package]] @@ -6648,6 +6649,7 @@ dependencies = [ "expectorate", "gateway-client", "id-map", + "iddqd", "illumos-utils", "indexmap 2.10.0", "internal-dns-resolver", @@ -6674,6 +6676,7 @@ dependencies = [ "sp-sim", "static_assertions", "strum", + "swrite", "test-strategy", "thiserror 2.0.12", "tufaceous-artifact", diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index c2c59738522..0d7cfd98bff 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -375,6 +375,8 @@ enum SledSetCommand { Policy(SledSetPolicyArgs), #[clap(flatten)] Visibility(SledSetVisibilityCommand), + /// set the mupdate override for this sled + MupdateOverride(SledSetMupdateOverrideArgs), } #[derive(Debug, Args)] @@ -496,6 +498,23 @@ struct SledUpdateSpArgs { inactive: Option, } +#[derive(Debug, Args)] +struct SledSetMupdateOverrideArgs { + #[clap(flatten)] + source: SledMupdateOverrideSource, +} + +#[derive(Debug, Args)] +#[group(id = "sled-mupdate-override-source", required = true, multiple = false)] +struct SledMupdateOverrideSource { + /// the new value of the mupdate override, or "unset" + mupdate_override_id: Option, + + /// simulate an error reading the mupdate override + #[clap(long, conflicts_with = "mupdate_override_id")] + with_error: bool, +} + #[derive(Debug, Args)] struct SledRemoveArgs { /// id of the sled @@ -1301,6 +1320,51 @@ fn cmd_sled_set( ))) } } + SledSetCommand::MupdateOverride(SledSetMupdateOverrideArgs { + source: + SledMupdateOverrideSource { mupdate_override_id, with_error }, + }) => { + let (desc, prev) = if with_error { + let prev = + system.description_mut().sled_set_mupdate_override_error( + sled_id, + "reconfigurator-cli simulated mupdate-override error" + .to_owned(), + )?; + ("error".to_owned(), prev) + } else { + let mupdate_override_id = + mupdate_override_id.expect("clap ensures that this is set"); + let prev = system.description_mut().sled_set_mupdate_override( + sled_id, + mupdate_override_id.into(), + )?; + let desc = match mupdate_override_id { + MupdateOverrideUuidOpt::Set(id) => id.to_string(), + MupdateOverrideUuidOpt::Unset => "unset".to_owned(), + }; + (desc, prev) + }; + + let prev_desc = match prev { + Ok(Some(id)) => id.to_string(), + Ok(None) => "unset".to_owned(), + Err(_) => "error".to_owned(), + }; + + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-set-mupdate-override: {}: {} -> {}", + sled_id, prev_desc, desc, + ), + state, + ); + + Ok(Some(format!( + "set sled {} mupdate override: {} -> {}", + sled_id, prev_desc, desc, + ))) + } } } diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt new file mode 100644 index 00000000000..dd0bb735439 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt @@ -0,0 +1,117 @@ +# Load an example system. + +load-example --nsleds 3 --ndisks-per-sled 1 + +# Create a TUF repository from a fake manifest. We're going to use this +# repository to test out the minimum release generation flow. +tuf-assemble ../../update-common/manifests/fake.toml +set target-release repo-1.0.0.zip + +# Update the install dataset on this sled to the target release. +# (This populates the zone manifest, used for no-op conversions from +# install dataset to artifact down the road.) +sled-update-install-dataset serial0 --to-target-release + +# Set one of sled 0's zone's image sources to a specific artifact, and +# also set an MGS update on the sled. Both should be reset as part of +# this process. +blueprint-edit latest set-zone-image 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact 1.2.3 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +blueprint-edit latest set-sp-update serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 sp 1.0.0 1.0.1 + +# Simulate a mupdate on sled 0 by setting the mupdate override field to a +# new UUID (generated using uuidgen). +sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 + +# On sled 1, simulate an error obtaining the mupdate override. +sled-set serial1 mupdate-override --with-error + +# Also set its SP update, which will not be cleared. +blueprint-edit latest set-sp-update serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest sp newer older + +# Simulate a mupdate on sled 2 as well. +sled-set serial2 mupdate-override 203fa72c-85c1-466a-8ed3-338ee029530d + +# Generate a new inventory and plan against that. +inventory-generate +blueprint-plan latest latest + +# Diff the blueprints. This diff should show: +# +# * for sled 0: +# * "+ will remove mupdate override" +# * for zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038, a change from artifact to install-dataset +# * the pending MGS update cleared +# * for sled 1, no change, because the mupdate override field had an error +# * for sled 2, "+ will remove mupdate override" +# * the target release's minimum generation bumped from 1 to 3 +# (the 3 is because generation 2 is repo-1.0.0.zip) +blueprint-diff latest + +# Hide sled 0 from inventory temporarily -- this does two things: +# 1. Tests that mupdate/update state transitions don't happen when +# the sled isn't present in inventory. +# 2. We don't want sled 0 to participate in the next few operations +# below. +sled-set serial0 inventory-hidden + +# Set the target release to a new repo, causing a generation number bump +# to 3. +set target-release repo-1.0.0.zip + +# Invoke the planner -- should not proceed with adding or updating zones +# because sled 0 has a remove-mupdate-override set in the blueprint. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Now simulate the new config being applied to sled 0, which would +# cause the mupdate override to be removed. +sled-set serial0 mupdate-override unset +sled-set serial0 inventory-visible + +# But simulate a second mupdate on sled 2. This should invalidate the existing +# mupdate override on sled 2 and cause another target release minimum +# generation bump. +tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver +sled-update-install-dataset serial2 --from-repo repo-2.0.0.zip +sled-set serial2 mupdate-override 1c0ce176-6dc8-4a90-adea-d4a8000751da + +# Generate a new inventory and plan against that. +inventory-generate +blueprint-plan latest latest + +# Diff the blueprints. This diff should show: +# * on sled 0: +# * the "remove mupdate override" line going away +# * no-op image source switches from install dataset to artifact +# * on sled 1, no changes +# * on sled 2, a _change_ in the will-remove-mupdate-override field +# * another bump to the target release minimum generation, this time to 4. +blueprint-diff latest + +# Clear the mupdate override on sled 2, signifying that the config has been +# applied. +sled-set serial2 mupdate-override unset + +# Run the planner again. This should cause sled 2's mupdate override field +# to be unset, but no further planning steps to happen because the +# target release generation is not new enough. +inventory-generate +blueprint-plan latest latest +blueprint-show latest +blueprint-diff latest + +# Now set the target release -- with this, the rest of the planner starts +# working again. +set target-release repo-2.0.0.zip +blueprint-plan latest latest +blueprint-show latest +blueprint-diff latest + +# Set the target release minimum generation to a large value -- we're going to +# test that the planner bails if it attempts a rollback of the target release +# minimum generation. +blueprint-edit latest set-target-release-min-gen 1000 +sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 +inventory-generate +blueprint-plan latest latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt index 9d7d0db2ee9..a7b20c01a71 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt @@ -23,10 +23,10 @@ sled-update-install-dataset serial0 --to-target-release sled-update-install-dataset serial1 --with-manifest-error # On a third sled, update the install dataset and simulate a mupdate override. -# (Currently we do this in the blueprint, but with -# https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and -# set a mupdate-override on the sled directly.) +# Also set it in the blueprint -- this simulates the situation where the mupdate +# override is in progress and will be cleared in the future. sled-update-install-dataset serial2 --to-target-release +sled-set serial2 mupdate-override ffffffff-ffff-ffff-ffff-ffffffffffff blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff # On a fourth sled, simulate an error validating the install dataset image on one zone. diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stderr b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stderr new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout new file mode 100644 index 00000000000..9b8de09afcb --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -0,0 +1,1621 @@ +using provided RNG seed: reconfigurator-cli-test +> # Load an example system. + +> load-example --nsleds 3 --ndisks-per-sled 1 +loaded example system with: +- collection: f45ba181-4b56-42cc-a762-874d90184a43 +- blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + + +> # Create a TUF repository from a fake manifest. We're going to use this +> # repository to test out the minimum release generation flow. +> tuf-assemble ../../update-common/manifests/fake.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler +created repo-1.0.0.zip for system version 1.0.0 + +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: SimGimletSp, kind: gimlet_sp, version: 1.0.0, hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, length: 747 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 1.0.0, hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: 1.0.0, hash: 9b7575cad720f017e936fe5994fc4e21fe040acaaf83c2edd86132aa3d667c7b, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: f355fb8429a7e0f0716dad035f9a06c799168d6c0ffcde85b1a96fef21d4b53e, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: f896cf5b19ca85864d470ad8587f980218bff3954e7f52bbd999699cd0f9635b, length: 744 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: ab32ec86e942e1a16c8d43ea143cd80dd05a9639529d3569b1c24dfa2587ee74, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +set target release based on repo-1.0.0.zip + + +> # Update the install dataset on this sled to the target release. +> # (This populates the zone manifest, used for no-op conversions from +> # install dataset to artifact down the road.) +> sled-update-install-dataset serial0 --to-target-release +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: to target release (system version 1.0.0) + + +> # Set one of sled 0's zone's image sources to a specific artifact, and +> # also set an MGS update on the sled. Both should be reset as part of +> # this process. +> blueprint-edit latest set-zone-image 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact 1.2.3 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 image source to artifact: version 1.2.3 +warn: no validation is done on the requested image source + +> blueprint-edit latest set-sp-update serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 sp 1.0.0 1.0.1 +blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 created from latest blueprint (8da82a8e-bf97-4fbd-8ddd-9f6462732cf1): configured update for serial serial0 +warn: no validation is done on the requested artifact hash or version + + +> # Simulate a mupdate on sled 0 by setting the mupdate override field to a +> # new UUID (generated using uuidgen). +> sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + +> # On sled 1, simulate an error obtaining the mupdate override. +> sled-set serial1 mupdate-override --with-error +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: unset -> error + + +> # Also set its SP update, which will not be cleared. +> blueprint-edit latest set-sp-update serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest sp newer older +blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 created from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4): configured update for serial serial1 +warn: no validation is done on the requested artifact hash or version + + +> # Simulate a mupdate on sled 2 as well. +> sled-set serial2 mupdate-override 203fa72c-85c1-466a-8ed3-338ee029530d +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: unset -> 203fa72c-85c1-466a-8ed3-338ee029530d + + +> # Generate a new inventory and plan against that. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, new_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971, prev_bp_override: None, zones: + - zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (Nexus) updated from artifact: version 1.2.3 to install dataset + - zone 427ec88f-f467-42fa-9bbb-66a91a36103c (InternalDns) left unchanged, image source: install dataset + - zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (Crucible) left unchanged, image source: install dataset + - zone 6444f8a5-6465-4f0b-a549-1993c113569c (InternalNtp) left unchanged, image source: install dataset + - zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (ExternalDns) left unchanged, image source: install dataset + - zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (CruciblePantry) left unchanged, image source: install dataset + +INFO previous MGS update cleared as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, artifact_version: 1.1.0, artifact_hash: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, expected_inactive_version: Version(ArtifactVersion("1.0.1")), expected_active_version: 1.0.0, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, new_bp_override: 203fa72c-85c1-466a-8ed3-338ee029530d, prev_bp_override: None, zones: + - zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (Nexus) left unchanged, image source: install dataset + - zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (CruciblePantry) left unchanged, image source: install dataset + - zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (InternalDns) left unchanged, image source: install dataset + - zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (InternalNtp) left unchanged, image source: install dataset + - zone f55647d4-5500-4ad3-893a-df45bd50d622 (Crucible) left unchanged, image source: install dataset + - zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (ExternalDns) left unchanged, image source: install dataset + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 1, new_generation: 3 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (2) is lower than minimum required by blueprint (3); sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO noop converting 0/7 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, bp_remove_mupdate_override_id: 6123eac1-ec5b-42ba-b73f-9845105a9971 +INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: 203fa72c-85c1-466a-8ed3-338ee029530d +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint df06bb57-ad42-4431-9206-abff322896c7 based on parent blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 + + +> # Diff the blueprints. This diff should show: +> # +> # * for sled 0: +> # * "+ will remove mupdate override" +> # * for zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038, a change from artifact to install-dataset +> # * the pending MGS update cleared +> # * for sled 1, no change, because the mupdate override field had an error +> # * for sled 2, "+ will remove mupdate override" +> # * the target release's minimum generation bumped from 1 to 3 +> # (the 3 is because generation 2 is repo-1.0.0.zip) +> blueprint-diff latest +from: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 +to: blueprint df06bb57-ad42-4431-9206-abff322896c7 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): ++ will remove mupdate override: (none) -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - artifact: version 1.2.3 in service fd00:1122:3344:101::22 + └─ + install dataset + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): ++ will remove mupdate override: (none) -> 203fa72c-85c1-466a-8ed3-338ee029530d + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 1 -> 3 + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } +- sled 0 model0 serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 Sp { expected_active_version: ArtifactVersion("1.0.0"), expected_inactive_version: Version(ArtifactVersion("1.0.1")) } + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Hide sled 0 from inventory temporarily -- this does two things: +> # 1. Tests that mupdate/update state transitions don't happen when +> # the sled isn't present in inventory. +> # 2. We don't want sled 0 to participate in the next few operations +> # below. +> sled-set serial0 inventory-hidden +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 inventory visibility: visible -> hidden + + +> # Set the target release to a new repo, causing a generation number bump +> # to 3. +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: SimGimletSp, kind: gimlet_sp, version: 1.0.0, hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, length: 747 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 1.0.0, hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: 1.0.0, hash: 9b7575cad720f017e936fe5994fc4e21fe040acaaf83c2edd86132aa3d667c7b, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: f355fb8429a7e0f0716dad035f9a06c799168d6c0ffcde85b1a96fef21d4b53e, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: f896cf5b19ca85864d470ad8587f980218bff3954e7f52bbd999699cd0f9635b, length: 744 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: ab32ec86e942e1a16c8d43ea143cd80dd05a9639529d3569b1c24dfa2587ee74, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +set target release based on repo-1.0.0.zip + + +> # Invoke the planner -- should not proceed with adding or updating zones +> # because sled 0 has a remove-mupdate-override set in the blueprint. +> inventory-generate +generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds + +> blueprint-plan latest latest +WARN skipping zones eligible for cleanup check (sled not present in latest inventory collection), sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +WARN no inventory found for in-service sled, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO noop converting 0/7 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO skipping noop image source check (sled not present in latest inventory collection), sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: 203fa72c-85c1-466a-8ed3-338ee029530d +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba based on parent blueprint df06bb57-ad42-4431-9206-abff322896c7 + +> blueprint-diff latest +from: blueprint df06bb57-ad42-4431-9206-abff322896c7 +to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 3 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 2) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Now simulate the new config being applied to sled 0, which would +> # cause the mupdate override to be removed. +> sled-set serial0 mupdate-override unset +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> unset + +> sled-set serial0 inventory-visible +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 inventory visibility: hidden -> visible + + +> # But simulate a second mupdate on sled 2. This should invalidate the existing +> # mupdate override on sled 2 and cause another target release minimum +> # generation bump. +> tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver +INFO assembling repository in +INFO artifacts assembled and archived to `repo-2.0.0.zip`, component: OmicronRepoAssembler +created repo-2.0.0.zip for system version 2.0.0 + +> sled-update-install-dataset serial2 --from-repo repo-2.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 2.0.0, hash: ce1e98a8a9ae541654508f101d59a3ddeba3d28177f1d42d5614248eef0b820b, length: 751 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 2.0.0, hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 2.0.0, hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: non-semver, hash: 24f8ca0d52da5238644b11964c6feda854c7530820713efefa7ac91683b3fc76, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: non-semver, hash: 5fceee33d358aacb8a34ca93a30e28354bd8f341f6e3e895a2cafe83904f3d80, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 2.0.0, hash: bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 2.0.0, hash: 1eb9f24be68f13c274aa0ac9b863cec520dbfe762620c328431728d75bfd2198, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 2.0.0, hash: 50fe271948672a9af1ba5f96c9d87ff2736fa72d78dfef598a79fa0cc8a00474, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 2.0.0, hash: ebc82bf181db864b78cb7e3ddedf7ab1dd8fe7b377b02846f3c27cf0387bb387, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 2.0.0, hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 2.0.0, hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 2.0.0, hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 2.0.0, hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 2.0.0, hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 2.0.0, hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 2.0.0, hash: 9f4bc56a15d5fd943fdac94309994b8fd73aa2be1ec61faf44bfcf2356c9dc23, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 2.0.0, hash: 7adf04de523865003dbf120cebddd5fcf5bad650640281b294197e6ca7016e47, length: 748 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 2.0.0, hash: 5a9019c484c051edfab4903a7a5e1817c89bd555eea3e48f6b92c6e67442e13e, length: 746 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: non-semver-2, hash: a0d6df68e6112edcf62c035947563d2a58d06e11443b95b90bf087da710550a5, length: 758 +sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: from repo at repo-2.0.0.zip (system version 2.0.0) + +> sled-set serial2 mupdate-override 1c0ce176-6dc8-4a90-adea-d4a8000751da +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: 203fa72c-85c1-466a-8ed3-338ee029530d -> 1c0ce176-6dc8-4a90-adea-d4a8000751da + + +> # Generate a new inventory and plan against that. +> inventory-generate +generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO inventory override no longer exists, blueprint override cleared, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, prev_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971 +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, new_bp_override: 1c0ce176-6dc8-4a90-adea-d4a8000751da, prev_bp_override: Some(203fa72c-85c1-466a-8ed3-338ee029530d (mupdate_override)), zones: + - zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (Nexus) left unchanged, image source: install dataset + - zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (CruciblePantry) left unchanged, image source: install dataset + - zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (InternalDns) left unchanged, image source: install dataset + - zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (InternalNtp) left unchanged, image source: install dataset + - zone f55647d4-5500-4ad3-893a-df45bd50d622 (Crucible) left unchanged, image source: install dataset + - zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (ExternalDns) left unchanged, image source: install dataset + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 3, new_generation: 4 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (3) is lower than minimum required by blueprint (4); sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO noop converting 0/7 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: nexus v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: internal-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-zone v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: ntp v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: external-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-pantry-zone v1.0.0 (zone) +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: 1c0ce176-6dc8-4a90-adea-d4a8000751da +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba + + +> # Diff the blueprints. This diff should show: +> # * on sled 0: +> # * the "remove mupdate override" line going away +> # * no-op image source switches from install dataset to artifact +> # * on sled 1, no changes +> # * on sled 2, a _change_ in the will-remove-mupdate-override field +> # * another bump to the target release minimum generation, this time to 4. +> blueprint-diff latest +from: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba +to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 4 -> 5): +- will remove mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> (none) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + └─ + artifact: version 1.0.0 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 - install dataset in service fd00:1122:3344:101::24 + └─ + artifact: version 1.0.0 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d - install dataset in service fd00:1122:3344:101::23 + └─ + artifact: version 1.0.0 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c - install dataset in service fd00:1122:3344:2::1 + └─ + artifact: version 1.0.0 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c - install dataset in service fd00:1122:3344:101::21 + └─ + artifact: version 1.0.0 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - install dataset in service fd00:1122:3344:101::22 + └─ + artifact: version 1.0.0 + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): +* will remove mupdate override: 203fa72c-85c1-466a-8ed3-338ee029530d -> 1c0ce176-6dc8-4a90-adea-d4a8000751da + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 3 -> 4 + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Clear the mupdate override on sled 2, signifying that the config has been +> # applied. +> sled-set serial2 mupdate-override unset +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: 1c0ce176-6dc8-4a90-adea-d4a8000751da -> unset + + +> # Run the planner again. This should cause sled 2's mupdate override field +> # to be unset, but no further planning steps to happen because the +> # target release generation is not new enough. +> inventory-generate +generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO inventory override no longer exists, blueprint override cleared, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, prev_bp_override: 1c0ce176-6dc8-4a90-adea-d4a8000751da +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (3) is lower than minimum required by blueprint (4) +INFO noop converting 0/7 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/0 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/6 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac + +> blueprint-show latest +blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +parent: 9034c710-3e57-45f3-99e5-4316145e87ac + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: (none) + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 4 + + PENDING MGS-MANAGED UPDATES: 1 + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } + + + +> blueprint-diff latest +from: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac +to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 4 -> 5): +- will remove mupdate override: 1c0ce176-6dc8-4a90-adea-d4a8000751da -> (none) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Now set the target release -- with this, the rest of the planner starts +> # working again. +> set target-release repo-2.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 2.0.0, hash: ce1e98a8a9ae541654508f101d59a3ddeba3d28177f1d42d5614248eef0b820b, length: 751 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 2.0.0, hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 2.0.0, hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: non-semver, hash: 24f8ca0d52da5238644b11964c6feda854c7530820713efefa7ac91683b3fc76, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: non-semver, hash: 5fceee33d358aacb8a34ca93a30e28354bd8f341f6e3e895a2cafe83904f3d80, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 2.0.0, hash: bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 2.0.0, hash: 1eb9f24be68f13c274aa0ac9b863cec520dbfe762620c328431728d75bfd2198, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 2.0.0, hash: 50fe271948672a9af1ba5f96c9d87ff2736fa72d78dfef598a79fa0cc8a00474, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 2.0.0, hash: ebc82bf181db864b78cb7e3ddedf7ab1dd8fe7b377b02846f3c27cf0387bb387, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 2.0.0, hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 2.0.0, hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 2.0.0, hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 2.0.0, hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 2.0.0, hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 2.0.0, hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 2.0.0, hash: 9f4bc56a15d5fd943fdac94309994b8fd73aa2be1ec61faf44bfcf2356c9dc23, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 2.0.0, hash: 7adf04de523865003dbf120cebddd5fcf5bad650640281b294197e6ca7016e47, length: 748 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 2.0.0, hash: 5a9019c484c051edfab4903a7a5e1817c89bd555eea3e48f6b92c6e67442e13e, length: 746 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: non-semver-2, hash: a0d6df68e6112edcf62c035947563d2a58d06e11443b95b90bf087da710550a5, length: 758 +set target release based on repo-2.0.0.zip + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO noop converting 0/7 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/0 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: nexus v2.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: crucible-pantry-zone v2.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: internal-dns v2.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: ntp v2.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: crucible-zone v2.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, tuf_artifact_id: external-dns v2.0.0 (zone) +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 +INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 +INFO SP update impossible (will remove it and re-evaluate board), artifact_version: newest, artifact_hash: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, expected_inactive_version: Version(ArtifactVersion("older")), expected_active_version: newer, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 +INFO skipping board for SP update, serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 +INFO skipping board for SP update, serial_number: serial0, part_number: model0 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial2, part_number: model2 +INFO skipping board for SP update, serial_number: serial2, part_number: model2 +INFO ran out of boards for SP update +INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 + +> blueprint-show latest +blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 +parent: d60afc57-f15d-476c-bd0f-b1071e2bb976 + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 2.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 2.0.0 in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 2.0.0 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 2.0.0 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 2.0.0 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 2.0.0 in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates, sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates, sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates, sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates, sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates, sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 4 + + PENDING MGS-MANAGED UPDATES: 0 + + +> blueprint-diff latest +from: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5 -> 6): + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - install dataset in service fd00:1122:3344:103::25 + └─ + artifact: version 2.0.0 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 - install dataset in service fd00:1122:3344:103::24 + └─ + artifact: version 2.0.0 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 - install dataset in service fd00:1122:3344:103::23 + └─ + artifact: version 2.0.0 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 - install dataset in service fd00:1122:3344:3::1 + └─ + artifact: version 2.0.0 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 - install dataset in service fd00:1122:3344:103::21 + └─ + artifact: version 2.0.0 +* nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 - install dataset in service fd00:1122:3344:103::22 + └─ + artifact: version 2.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Set the target release minimum generation to a large value -- we're going to +> # test that the planner bails if it attempts a rollback of the target release +> # minimum generation. +> blueprint-edit latest set-target-release-min-gen 1000 +blueprint 626487fa-7139-45ec-8416-902271fc730b created from latest blueprint (a5a8f242-ffa5-473c-8efd-2acf2dc0b736): set target release minimum generation to 1000 + +> sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: error -> cc724abe-80c1-47e6-9771-19e6540531a9 + +> inventory-generate +generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds + +> blueprint-plan latest latest +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, new_bp_override: cc724abe-80c1-47e6-9771-19e6540531a9, prev_bp_override: None, zones: + - zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (Clickhouse) left unchanged, image source: install dataset + - zone 466a9f29-62bf-4e63-924a-b9efdb86afec (Nexus) left unchanged, image source: install dataset + - zone 62620961-fc4a-481e-968b-f5acbac0dc63 (InternalNtp) left unchanged, image source: install dataset + - zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (ExternalDns) left unchanged, image source: install dataset + - zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (InternalDns) left unchanged, image source: install dataset + - zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (CruciblePantry) left unchanged, image source: install dataset + - zone bd354eef-d8a6-4165-9124-283fb5e46d77 (Crucible) left unchanged, image source: install dataset + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +error: generating blueprint: target release minimum generation was set to 1000, but we tried to set it to the older generation 5, indicating a possible table rollback which should not happen + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 6ca20a8347c..b99ca2c362b 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -80,12 +80,14 @@ sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: install dataset updated: simulated er > # On a third sled, update the install dataset and simulate a mupdate override. -> # (Currently we do this in the blueprint, but with -> # https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and -> # set a mupdate-override on the sled directly.) +> # Also set it in the blueprint -- this simulates the situation where the mupdate +> # override is in progress and will be cleared in the future. > sled-update-install-dataset serial2 --to-target-release sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: to target release (system version 1.0.0) +> sled-set serial2 mupdate-override ffffffff-ffff-ffff-ffff-ffffffffffff +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: unset -> ffffffff-ffff-ffff-ffff-ffffffffffff + > blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff @@ -156,6 +158,8 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > blueprint-plan latest latest WARN skipping zones eligible for cleanup check (sled not present in latest inventory collection), sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +WARN no inventory found for in-service sled, phase: do_plan_mupdate_override, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 WARN skipping noop image source check since sled-agent encountered error retrieving zone manifest (this is abnormal), sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, error: reconfigurator-sim simulated error: simulated error obtaining zone manifest INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: nexus v1.0.0 (zone) INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: internal-dns v1.0.0 (zone) @@ -174,19 +178,6 @@ INFO noop converting 5/6 install-dataset zones to artifact store, sled_id: aff6c INFO noop converting 0/2 install-dataset zones to artifact store, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7 INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: ffffffff-ffff-ffff-ffff-ffffffffffff INFO skipping noop image source check (sled not present in latest inventory collection), sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e -INFO parent blueprint contains NTP zone, but it's not in inventory yet, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e -INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 -INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 -INFO configuring SP update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 -INFO reached maximum number of pending SP updates, max: 1 INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 @@ -334,14 +325,6 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 generation: 1 (unchanged) read from:: SingleNode (unchanged) - PENDING MGS UPDATES: - - Pending MGS-managed updates (all baseboards): - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -+ sled 0 model0 serial0 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion } - internal DNS: * DNS zone: "control-plane.oxide.internal": @@ -511,6 +494,7 @@ set sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e inventory visibility: hidden -> vi generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan latest latest +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 WARN skipping noop image source check since sled-agent encountered error retrieving zone manifest (this is abnormal), sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, error: reconfigurator-sim simulated error: simulated error obtaining zone manifest INFO noop converting 0/0 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, error: reconfigurator-sim: simulated error validating zone image @@ -520,18 +504,6 @@ INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_ INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, tuf_artifact_id: crucible-zone v1.0.0 (zone) INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, tuf_artifact_id: ntp v1.0.0 (zone) INFO noop converting 2/2 install-dataset zones to artifact store, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e -INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 -INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 -INFO SP update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 -INFO reached maximum number of pending SP updates, max: 1 INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index 8892b3fbb48..3811d182e1d 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -498,7 +498,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 METADATA: created by::::::::::::: reconfigurator-cli created at::::::::::::: - comment:::::::::::::::: (none) + comment:::::::::::::::: updated target release minimum generation from 1 to 2 internal DNS version::: 1 external DNS version::: 1 target release min gen: 2 diff --git a/nexus/reconfigurator/blippy/Cargo.toml b/nexus/reconfigurator/blippy/Cargo.toml index e7f72088718..a5a02a65c1d 100644 --- a/nexus/reconfigurator/blippy/Cargo.toml +++ b/nexus/reconfigurator/blippy/Cargo.toml @@ -11,8 +11,8 @@ nexus-sled-agent-shared.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true - omicron-workspace-hack.workspace = true +tufaceous-artifact.workspace = true [dev-dependencies] nexus-reconfigurator-planning.workspace = true diff --git a/nexus/reconfigurator/blippy/src/blippy.rs b/nexus/reconfigurator/blippy/src/blippy.rs index 6ce46489b74..ed3c918031a 100644 --- a/nexus/reconfigurator/blippy/src/blippy.rs +++ b/nexus/reconfigurator/blippy/src/blippy.rs @@ -9,17 +9,20 @@ use core::fmt; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintDatasetConfig; use nexus_types::deployment::BlueprintZoneConfig; +use nexus_types::deployment::BlueprintZoneImageVersion; use nexus_types::inventory::ZpoolName; use omicron_common::address::DnsSubnet; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::MacAddr; use omicron_common::disk::DatasetKind; +use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use std::collections::BTreeSet; use std::net::IpAddr; use std::net::SocketAddrV6; +use tufaceous_artifact::ArtifactHash; #[derive(Debug, Clone, PartialEq, Eq)] pub struct Note { @@ -177,6 +180,12 @@ pub enum SledKind { dataset: BlueprintDatasetConfig, address: SocketAddrV6, }, + MupdateOverrideWithArtifactZone { + mupdate_override_id: MupdateOverrideUuid, + zone: BlueprintZoneConfig, + version: BlueprintZoneImageVersion, + hash: ArtifactHash, + }, } impl fmt::Display for SledKind { @@ -372,6 +381,20 @@ impl fmt::Display for SledKind { dataset.kind, dataset.id, address, ) } + SledKind::MupdateOverrideWithArtifactZone { + mupdate_override_id, + zone, + version, + hash, + } => { + write!( + f, + "sled has remove_mupdate_override set ({mupdate_override_id}), \ + but zone {} image source is set to Artifact (version {version}, \ + hash {hash})", + zone.id, + ) + } } } } diff --git a/nexus/reconfigurator/blippy/src/checks.rs b/nexus/reconfigurator/blippy/src/checks.rs index b92bdbafc95..b8137ab72e0 100644 --- a/nexus/reconfigurator/blippy/src/checks.rs +++ b/nexus/reconfigurator/blippy/src/checks.rs @@ -12,8 +12,10 @@ use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintSledConfig; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::SledFilter; use nexus_types::deployment::blueprint_zone_type; use omicron_common::address::DnsSubnet; use omicron_common::address::Ipv6Subnet; @@ -31,6 +33,7 @@ pub(crate) fn perform_all_blueprint_only_checks(blippy: &mut Blippy<'_>) { check_external_networking(blippy); check_dataset_zpool_uniqueness(blippy); check_datasets(blippy); + check_mupdate_override(blippy); } fn check_underlay_ips(blippy: &mut Blippy<'_>) { @@ -537,6 +540,55 @@ fn check_datasets(blippy: &mut Blippy<'_>) { } } +fn check_mupdate_override(blippy: &mut Blippy<'_>) { + // Perform checks for invariants that should be upheld if + // remove_mupdate_override is set for a sled. + for (&sled_id, sled) in &blippy.blueprint().sleds { + if !sled.state.matches(SledFilter::InService) { + continue; + } + + if let Some(mupdate_override_id) = sled.remove_mupdate_override { + // All in-service zones should be set to InstallDataset. + for zone in &sled.zones { + if zone.disposition.is_in_service() { + match &zone.image_source { + BlueprintZoneImageSource::InstallDataset => { + // This is valid. + } + BlueprintZoneImageSource::Artifact { + version, + hash, + } => { + // This is invalid -- if remove_mupdate_override is + // set, all zones must be InstallDataset. + blippy.push_sled_note( + sled_id, + Severity::Fatal, + SledKind::MupdateOverrideWithArtifactZone { + mupdate_override_id, + zone: zone.clone(), + version: version.clone(), + hash: *hash, + }, + ); + } + } + } + } + + // TODO: The host phase 2 contents should be set to CurrentContents + // (waiting for + // https://github.com/oxidecomputer/omicron/issues/8542). + + // TODO: PendingMgsUpdates for this sled should be empty. Mapping + // sled IDs to their MGS identifiers (baseboard ID) requires a map + // that's not currently part of the blueprint. We may want to either + // include that map in the blueprint, or pass it in via blippy. + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -545,10 +597,14 @@ mod tests { use crate::blippy::Note; use nexus_reconfigurator_planning::example::ExampleSystemBuilder; use nexus_reconfigurator_planning::example::example; + use nexus_types::deployment::BlueprintZoneImageVersion; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::blueprint_zone_type; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::MupdateOverrideUuid; use std::mem; + use tufaceous_artifact::ArtifactHash; + use tufaceous_artifact::ArtifactVersion; // The tests below all take the example blueprint, mutate in some invalid // way, and confirm that blippy reports the invalidity. This test confirms @@ -1561,4 +1617,61 @@ mod tests { logctx.cleanup_successful(); } + + #[test] + fn test_mupdate_override_with_artifact_image_source() { + static TEST_NAME: &str = + "test_remove_mupdate_override_with_artifact_image_source"; + let logctx = test_setup_log(TEST_NAME); + let (_, _, mut blueprint) = example(&logctx.log, TEST_NAME); + + // Find a sled with zones and set remove_mupdate_override on it. + let (&sled_id, sled) = blueprint + .sleds + .iter_mut() + .find(|(_, config)| !config.zones.is_empty()) + .expect("at least one sled with zones"); + + // Set the remove_mupdate_override field on the sled. + let mupdate_override_id = MupdateOverrideUuid::max(); + sled.remove_mupdate_override = Some(mupdate_override_id); + + // Find a zone and set it to use an artifact image source. + let kind = { + let mut zone = sled + .zones + .iter_mut() + .find(|z| z.disposition.is_in_service()) + .expect("at least one in-service zone"); + + let version = BlueprintZoneImageVersion::Available { + version: ArtifactVersion::new_const("1.0.0"), + }; + let hash = ArtifactHash([1u8; 32]); + zone.image_source = BlueprintZoneImageSource::Artifact { + version: version.clone(), + hash, + }; + + SledKind::MupdateOverrideWithArtifactZone { + mupdate_override_id, + zone: zone.clone(), + version, + hash, + } + }; + + let expected_note = Note { + severity: Severity::Fatal, + kind: Kind::Sled { sled_id, kind }, + }; + + let report = + Blippy::new(&blueprint).into_report(BlippyReportSortKey::Kind); + eprintln!("{}", report.display()); + assert_eq!(report.notes().len(), 1, "exactly one note expected"); + assert_eq!(report.notes()[0], expected_note); + + logctx.cleanup_successful(); + } } diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 756d7dd604c..64fc3a5349e 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -13,6 +13,7 @@ chrono.workspace = true debug-ignore.workspace = true daft.workspace = true gateway-client.workspace = true +iddqd.workspace = true id-map.workspace = true illumos-utils.workspace = true indexmap.workspace = true @@ -36,6 +37,7 @@ slog-error-chain.workspace = true sp-sim.workspace = true static_assertions.workspace = true strum.workspace = true +swrite.workspace = true thiserror.workspace = true tufaceous-artifact.workspace = true typed-rng.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 767c953652b..b4ded230f8a 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -21,6 +21,9 @@ use anyhow::anyhow; use anyhow::bail; use clickhouse_admin_types::OXIMETER_CLUSTER; use id_map::IdMap; +use iddqd::IdOrdItem; +use iddqd::IdOrdMap; +use iddqd::id_upcast; use itertools::Either; use nexus_inventory::now_db_precision; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; @@ -41,6 +44,7 @@ use nexus_types::deployment::OmicronZoneExternalFloatingAddr; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::deployment::OximeterReadMode; +use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdates; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; @@ -83,6 +87,8 @@ use std::net::IpAddr; use std::net::Ipv6Addr; use std::net::SocketAddr; use std::net::SocketAddrV6; +use swrite::SWrite; +use swrite::swriteln; use thiserror::Error; use super::ClickhouseZonesThatShouldBeRunning; @@ -135,6 +141,15 @@ pub enum Error { expected: Generation, actual: Generation, }, + #[error( + "target release minimum generation was set to {current}, \ + but we tried to set it to the older generation {new}, indicating a \ + possible table rollback which should not happen" + )] + TargetReleaseMinimumGenerationRollback { + current: Generation, + new: Generation, + }, #[error(transparent)] TufRepoContentsError(#[from] TufRepoContentsError), } @@ -266,6 +281,29 @@ impl From for SledEditCounts { } } +/// A list of scalar (primitive) values which have been edited on a sled. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct EditedSledScalarEdits { + /// Whether the remove_mupdate_override field was modified. + pub remove_mupdate_override: bool, + /// Whether the debug operation to force a Sled Agent generation bump was + /// set. + pub debug_force_generation_bump: bool, +} + +impl EditedSledScalarEdits { + pub fn zeroes() -> Self { + Self { + debug_force_generation_bump: false, + remove_mupdate_override: false, + } + } + + pub fn has_edits(&self) -> bool { + self.debug_force_generation_bump || self.remove_mupdate_override + } +} + /// Describes operations which the BlueprintBuilder has performed to arrive /// at its state. /// @@ -305,6 +343,10 @@ pub(crate) enum Operation { num_datasets_expunged: usize, num_zones_expunged: usize, }, + SetTargetReleaseMinimumGeneration { + current_generation: Generation, + new_generation: Generation, + }, SledNoopZoneImageSourcesUpdated { sled_id: SledUuid, count: usize, @@ -380,6 +422,16 @@ impl fmt::Display for Operation { zone image source updates" ) } + Self::SetTargetReleaseMinimumGeneration { + current_generation, + new_generation, + } => { + write!( + f, + "updated target release minimum generation from \ + {current_generation} to {new_generation}" + ) + } } } } @@ -649,9 +701,14 @@ impl<'a> BlueprintBuilder<'a> { // are no longer in service and need expungement work. let mut sleds = BTreeMap::new(); for (sled_id, editor) in self.sled_editors { - let EditedSled { config, edit_counts } = editor.finalize(); + let EditedSled { config, edit_counts, scalar_edits } = + editor.finalize(); sleds.insert(sled_id, config); - if edit_counts.has_nonzero_counts() { + if edit_counts.has_nonzero_counts() || scalar_edits.has_edits() { + let EditedSledScalarEdits { + debug_force_generation_bump, + remove_mupdate_override, + } = scalar_edits; debug!( self.log, "sled modified in new blueprint"; "sled_id" => %sled_id, @@ -659,6 +716,8 @@ impl<'a> BlueprintBuilder<'a> { "disk_edits" => ?edit_counts.disks, "dataset_edits" => ?edit_counts.datasets, "zone_edits" => ?edit_counts.zones, + "debug_force_generation_bump" => debug_force_generation_bump, + "remove_mupdate_override_modified" => remove_mupdate_override, ); } else { debug!( @@ -1160,6 +1219,36 @@ impl<'a> BlueprintBuilder<'a> { Ok(editor.get_remove_mupdate_override()) } + /// Updates a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub fn sled_ensure_mupdate_override( + &mut self, + sled_id: SledUuid, + inv_mupdate_override_id: Option, + ) -> Result { + let editor = self.sled_editors.get_mut(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "tried to ensure mupdate override for unknown sled {sled_id}" + )) + })?; + + // Also map the editor to the corresponding PendingMgsUpdates. + let sled_details = self + .input + .sled_lookup(SledFilter::InService, sled_id) + .map_err(|error| Error::Planner(anyhow!(error)))?; + // TODO: simplify down to &BaseboardId + let baseboard_id = Arc::new(sled_details.baseboard_id.clone()); + let pending_mgs_update = self.pending_mgs_updates.entry(baseboard_id); + + editor + .ensure_mupdate_override( + inv_mupdate_override_id, + pending_mgs_update, + ) + .map_err(|err| Error::SledEditError { sled_id, err }) + } + fn next_internal_dns_gz_address_index(&self, sled_id: SledUuid) -> u32 { let used_internal_dns_gz_address_indices = self .current_sled_zones( @@ -1960,21 +2049,29 @@ impl<'a> BlueprintBuilder<'a> { .len() } + /// Get the value of `target_release_minimum_generation`. + pub fn target_release_minimum_generation(&self) -> Generation { + self.target_release_minimum_generation + } + /// Given the current value of `target_release_minimum_generation`, set the /// new value for this blueprint. pub fn set_target_release_minimum_generation( &mut self, - current: Generation, - target_release_minimum_generation: Generation, + current_generation: Generation, + new_generation: Generation, ) -> Result<(), Error> { - if self.target_release_minimum_generation != current { + if self.target_release_minimum_generation != current_generation { return Err(Error::TargetReleaseMinimumGenerationMismatch { - expected: current, + expected: current_generation, actual: self.target_release_minimum_generation, }); } - self.target_release_minimum_generation = - target_release_minimum_generation; + self.target_release_minimum_generation = new_generation; + self.record_operation(Operation::SetTargetReleaseMinimumGeneration { + current_generation, + new_generation, + }); Ok(()) } @@ -2208,6 +2305,156 @@ pub(super) fn ensure_input_networking_records_appear_in_parent_blueprint( Ok(()) } +/// The result of an `ensure_mupdate_override` call for a particular sled. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum EnsureMupdateOverrideAction { + /// The inventory and blueprint overrides are consistent, so no action was + /// taken. + NoAction { + /// The mupdate override currently in place. + mupdate_override: Option, + }, + /// Inventory had an override that didn't match what was in the blueprint, + /// so the blueprint was updated to match the inventory. + BpSetOverride { + /// The override ID that was set. + inv_override: MupdateOverrideUuid, + /// The previous blueprint override that was removed. + prev_bp_override: Option, + /// The zones which were updated to the install dataset, along with + /// their old values. + zones: IdOrdMap, + /// The pending MGS update that was cleared, if any. + prev_mgs_update: Option>, + }, + /// The inventory did not have an override but the blueprint did, so the + /// blueprint's override was cleared. + BpClearOverride { + /// The previous blueprint override that was removed. + prev_bp_override: MupdateOverrideUuid, + }, + /// Sled Agent encountered an error occurred retrieving the mupdate override + /// from the inventory. + GetOverrideError { + /// An error message. + message: String, + }, +} + +impl EnsureMupdateOverrideAction { + pub fn log_to(&self, log: &slog::Logger) { + match self { + EnsureMupdateOverrideAction::NoAction { mupdate_override } => { + debug!( + log, + "no mupdate override action taken, current value left unchanged"; + "mupdate_override" => ?mupdate_override, + ); + } + EnsureMupdateOverrideAction::BpSetOverride { + inv_override, + prev_bp_override, + zones, + prev_mgs_update, + } => { + let mut zones_desc = String::new(); + if zones.is_empty() { + zones_desc.push_str("(none)"); + } else { + // Add a newline before the first zone -- it makes it easier + // to read in log output. + zones_desc.push('\n'); + for zone in zones { + swriteln!(zones_desc, " - {}", zone); + } + } + info!( + log, + "blueprint mupdate override updated to match inventory"; + "new_bp_override" => %inv_override, + "prev_bp_override" => ?prev_bp_override, + "zones" => zones_desc, + ); + if let Some(prev_mgs_update) = prev_mgs_update { + info!( + log, + "previous MGS update cleared as part of updating \ + blueprint mupdate override to match inventory"; + prev_mgs_update, + ); + } else { + info!( + log, + "no previous MGS update found as part of updating \ + blueprint mupdate override to match inventory", + ); + } + } + EnsureMupdateOverrideAction::BpClearOverride { + prev_bp_override, + } => { + info!( + log, + "inventory override no longer exists, blueprint override \ + cleared"; + "prev_bp_override" => %prev_bp_override, + ) + } + EnsureMupdateOverrideAction::GetOverrideError { message } => { + error!( + log, "error getting mupdate override info for sled, \ + not altering blueprint override"; + "message" => %message, + ); + } + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct EnsureMupdateOverrideUpdatedZone { + /// The ID of the zone. + pub zone_id: OmicronZoneUuid, + + /// The Omicron zone kind. + pub kind: ZoneKind, + + /// The previous image source. + pub old_image_source: BlueprintZoneImageSource, + + /// The new image source. + pub new_image_source: BlueprintZoneImageSource, +} + +impl fmt::Display for EnsureMupdateOverrideUpdatedZone { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.old_image_source == self.new_image_source { + write!( + f, + "zone {} ({:?}) left unchanged, image source: {}", + self.zone_id, self.kind, self.old_image_source, + ) + } else { + write!( + f, + "zone {} ({:?}) updated from {} to {}", + self.zone_id, + self.kind, + self.old_image_source, + self.new_image_source, + ) + } + } +} + +impl IdOrdItem for EnsureMupdateOverrideUpdatedZone { + type Key<'a> = OmicronZoneUuid; + fn key(&self) -> Self::Key<'_> { + self.zone_id + } + id_upcast!(); +} + #[cfg(test)] pub mod test { use super::*; diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs index 202584c93e0..4999a9f9644 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs @@ -4,8 +4,13 @@ //! Support for editing the blueprint details of a single sled. +use crate::blueprint_builder::EditedSledScalarEdits; +use crate::blueprint_builder::EnsureMupdateOverrideAction; +use crate::blueprint_builder::EnsureMupdateOverrideUpdatedZone; use crate::blueprint_builder::SledEditCounts; use crate::planner::SledPlannerRng; +use id_map::Entry; +use iddqd::IdOrdMap; use illumos_utils::zpool::ZpoolName; use itertools::Either; use nexus_sled_agent_shared::inventory::ZoneKind; @@ -18,6 +23,7 @@ use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::blueprint_zone_type; use nexus_types::external_api::views::SledState; use omicron_common::address::Ipv6Subnet; @@ -153,8 +159,11 @@ impl SledEditor { SledState::Decommissioned, "for_existing_decommissioned called on non-decommissioned sled" ); - let inner = - EditedSled { config, edit_counts: SledEditCounts::zeroes() }; + let inner = EditedSled { + config, + edit_counts: SledEditCounts::zeroes(), + scalar_edits: EditedSledScalarEdits::zeroes(), + }; Ok(Self(InnerSledEditor::Decommissioned(inner))) } @@ -363,6 +372,19 @@ impl SledEditor { self.as_active_mut()?.set_zone_image_source(zone_id, image_source) } + /// Updates a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub fn ensure_mupdate_override( + &mut self, + inv_mupdate_override_id: Option, + pending_mgs_update: Entry<'_, PendingMgsUpdate>, + ) -> Result { + self.as_active_mut()?.ensure_mupdate_override( + inv_mupdate_override_id, + pending_mgs_update, + ) + } + /// Sets remove-mupdate-override configuration for this sled. /// /// Currently only used in test code. @@ -411,6 +433,7 @@ struct ActiveSledEditor { pub(crate) struct EditedSled { pub config: BlueprintSledConfig, pub edit_counts: SledEditCounts, + pub scalar_edits: EditedSledScalarEdits, } impl ActiveSledEditor { @@ -470,6 +493,11 @@ impl ActiveSledEditor { self.remove_mupdate_override.is_modified(); let mut sled_agent_generation = self.incoming_sled_agent_generation; + let scalar_edits = EditedSledScalarEdits { + debug_force_generation_bump: self.debug_force_generation_bump, + remove_mupdate_override: remove_mupdate_override_is_modified, + }; + // Bump the generation if we made any changes of concern to sled-agent. if self.debug_force_generation_bump || disks_counts.has_nonzero_counts() @@ -496,6 +524,7 @@ impl ActiveSledEditor { datasets: datasets_counts, zones: zones_counts, }, + scalar_edits, } } @@ -696,6 +725,91 @@ impl ActiveSledEditor { Ok(()) } + /// Update a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub fn ensure_mupdate_override( + &mut self, + inv_mupdate_override_id: Option, + pending_mgs_update: Entry<'_, PendingMgsUpdate>, + ) -> Result { + match (inv_mupdate_override_id, *self.remove_mupdate_override.value()) { + (Some(inv_override), Some(bp_override)) + if inv_override == bp_override => + { + // If the inventory and blueprint overrides are the same, the + // sled agent hasn't yet removed the override. Nothing to do at + // the moment. + Ok(EnsureMupdateOverrideAction::NoAction { + mupdate_override: Some(inv_override), + }) + } + (Some(inv_override), bp_override) => { + // Inventory says there's an override in place, but the + // blueprint doesn't (or has a different override in place). + // This means that a MUPdate happened since we last did + // blueprint planning. + // + // Set the blueprint's remove_mupdate_override. + self.set_remove_mupdate_override(Some(inv_override)); + // Set all zone image sources to InstallDataset. This is an + // acknowledgement of the current state of the world. + let zone_ids: Vec<_> = self + .zones(BlueprintZoneDisposition::is_in_service) + .map(|zone| (zone.id, zone.kind())) + .collect(); + + let mut zones = IdOrdMap::with_capacity(zone_ids.len()); + for (zone_id, kind) in zone_ids { + let old_image_source = self.zones.set_zone_image_source( + &zone_id, + BlueprintZoneImageSource::InstallDataset, + )?; + let item = EnsureMupdateOverrideUpdatedZone { + zone_id, + kind, + old_image_source, + new_image_source: + BlueprintZoneImageSource::InstallDataset, + }; + zones.insert_unique(item).expect( + "self.zones is a BTreeMap so zone IDs are unique", + ); + } + + // Clear out the pending MGS update for this sled. + let prev_mgs_update = match pending_mgs_update { + Entry::Vacant(_) => None, + Entry::Occupied(entry) => Some(Box::new(entry.remove())), + }; + + // TODO: Do the same for host OS. + + Ok(EnsureMupdateOverrideAction::BpSetOverride { + inv_override, + prev_bp_override: bp_override, + zones, + prev_mgs_update, + }) + } + (None, Some(prev_bp_override)) => { + // The blueprint says there's an override in place, but the + // inventory doesn't. This means that the sled has removed its + // override that was set in the above branch. We can remove the + // override from the blueprint. + self.set_remove_mupdate_override(None); + Ok(EnsureMupdateOverrideAction::BpClearOverride { + prev_bp_override, + }) + } + (None, None) => { + // No override in place, nothing to do. + Ok(EnsureMupdateOverrideAction::NoAction { + mupdate_override: None, + }) + } + } + } + /// Set remove-mupdate-override configuration for this sled. pub fn set_remove_mupdate_override( &mut self, diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 7f7fcd7880a..ede9fd694f7 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -9,6 +9,7 @@ use crate::blueprint_builder::BlueprintBuilder; use crate::blueprint_builder::Ensure; use crate::blueprint_builder::EnsureMultiple; +use crate::blueprint_builder::EnsureMupdateOverrideAction; use crate::blueprint_builder::Error; use crate::blueprint_builder::Operation; use crate::blueprint_editor::DisksEditError; @@ -16,6 +17,7 @@ use crate::blueprint_editor::SledEditError; use crate::mgs_updates::plan_mgs_updates; use crate::planner::omicron_zone_placement::PlacementError; use gateway_client::types::SpType; +use itertools::Itertools; use nexus_sled_agent_shared::inventory::OmicronZoneType; use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::Blueprint; @@ -42,6 +44,7 @@ use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use slog::debug; use slog::error; +use slog::o; use slog::{Logger, info, warn}; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; @@ -153,12 +156,28 @@ impl<'a> Planner<'a> { fn do_plan(&mut self) -> Result<(), Error> { self.do_plan_expunge()?; self.do_plan_decommission()?; + let plan_mupdate_override_res = self.do_plan_mupdate_override()?; + + // Within `do_plan_noop_image_source`, we plan noop image sources on + // sleds other than those currently affected by mupdate overrides. This + // means that we don't have to wait for the `plan_mupdate_override_res` + // result for that step. self.do_plan_noop_image_source()?; - self.do_plan_add()?; - if let UpdateStepResult::ContinueToNextStep = self.do_plan_mgs_updates() + + if let UpdateStepResult::ContinueToNextStep = plan_mupdate_override_res { - self.do_plan_zone_updates()?; + // If do_plan_mupdate_override returns Waiting, we don't plan *any* + // additional steps until the system has recovered. + self.do_plan_add()?; + if let UpdateStepResult::ContinueToNextStep = + self.do_plan_mgs_updates() + { + self.do_plan_zone_updates()?; + } } + + // CockroachDB settings aren't dependent on zones, so they can be + // planned independently of the rest of the system. self.do_plan_cockroachdb_settings(); Ok(()) } @@ -1407,6 +1426,206 @@ impl<'a> Planner<'a> { Ok(()) } + fn do_plan_mupdate_override(&mut self) -> Result { + // For each sled, compare what's in the inventory to what's in the + // blueprint. + let mut actions_by_sled = BTreeMap::new(); + let log = self.log.new(o!("phase" => "do_plan_mupdate_override")); + + // We use the list of in-service sleds here -- we don't want to alter + // expunged or decommissioned sleds. + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + let log = log.new(o!("sled_id" => sled_id.to_string())); + let Some(inv_sled) = self.inventory.sled_agents.get(&sled_id) + else { + warn!(log, "no inventory found for in-service sled"); + continue; + }; + let action = match &inv_sled + .zone_image_resolver + .mupdate_override + .boot_override + { + Ok(inv_mupdate_override) => { + self.blueprint.sled_ensure_mupdate_override( + sled_id, + inv_mupdate_override + .as_ref() + .map(|inv| inv.mupdate_override_id), + )? + } + Err(message) => EnsureMupdateOverrideAction::GetOverrideError { + message: message.clone(), + }, + }; + action.log_to(&log); + actions_by_sled.insert(sled_id, action); + } + + // As a result of the action above, did any sleds get a new mupdate + // override in the blueprint? In that case, halt consideration of + // updates by setting the target_release_minimum_generation. + // + // Note that this is edge-triggered, not level-triggered. This is a + // domain requirement. Consider what happens if: + // + // 1. Let's say the target release generation is 5. + // 2. A sled is mupdated. + // 3. As a result of the mupdate, we update the target release minimum + // generation to 6. + // 4. Then, an operator sets the target release generation to 6. + // + // At this point, we *do not* want to set the blueprint's minimum + // generation to 7. We only want to do it if we acknowledged a new sled + // getting mupdated. + // + // Some notes: + // + // * We only process sleds that are currently in the inventory. This + // means that if some sleds take longer to come back up than others + // and the target release is updated in the middle, we'll potentially + // bump the minimum generation multiple times, asking the operator to + // intervene each time. + // + // It's worth considering ways to mitigate this in the future: for + // example, we could ensure that for a particular TUF repo a shared + // mupdate override ID is assigned by wicketd, and track the override + // IDs that are currently in flight. + // + // * We aren't handling errors while fetching the mupdate override here. + // We don't have a history of state transitions for the mupdate + // override, so we can't do edge-triggered logic. We probably need + // another channel to report errors. (But in general, errors should be + // rare.) + if actions_by_sled.values().any(|action| { + matches!(action, EnsureMupdateOverrideAction::BpSetOverride { .. }) + }) { + let current = self.blueprint.target_release_minimum_generation(); + let new = self.input.tuf_repo().target_release_generation.next(); + if current == new { + // No change needed. + info!( + log, + "would have updated target release minimum generation, but \ + it was already set to the desired value, so no change was \ + needed"; + "generation" => %current, + ); + } else { + if current < new { + info!( + log, + "updating target release minimum generation based on \ + new set-override actions"; + "current_generation" => %current, + "new_generation" => %new, + ); + } else { + // It would be very strange for the current value to be + // greater than the new value. That would indicate something + // like a row being removed from the target release + // generation table -- one of the invariants of the target + // release generation is that it only moves forward. + // + // In this case we bail out of planning entirely. + return Err( + Error::TargetReleaseMinimumGenerationRollback { + current, + new, + }, + ); + } + self.blueprint + .set_target_release_minimum_generation(current, new) + .expect("current value passed in => can't fail"); + } + } + + // Now we need to determine whether to also perform other actions like + // updating or adding zones. We have to be careful here: + // + // * We may have moved existing zones with an Artifact source to using + // the install dataset via the BpSetOverride action, but we don't want + // to use the install dataset on sleds that weren't MUPdated (because + // the install dataset might be ancient). + // + // * While any overrides are in place according to inventory, we wait + // for the system to recover and don't start new zones on *any* sleds, + // or perform any further updates. + // + // This condition is level-triggered on the following conditions: + // + // 1. If the planning input's target release generation is less than the + // minimum generation set in the blueprint, the operator hasn't set a + // new generation in the blueprint -- we should wait to decide what + // to do until the operator provides an indication. + // + // 2. If any sleds have a mupdate override set in the blueprint, then + // we're still recovering from a MUPdate. If that is the case, we + // don't want to add zones on *any* sled. + // + // This might seem overly conservative (why block zone additions on + // *all* sleds if *any* are currently recovering from a MUPdate?), + // but is probably correct for the medium term: we want to minimize + // the number of different versions of services running at any time. + // + // There's some potential to relax this in the future (e.g. by + // matching up the zone manifest with the target release to compute + // the number of versions running at a given time), but that's a + // non-trivial optimization that we should probably defer until we + // see its necessity. + // + // What does "any sleds" mean in this context? We don't need to care + // about decommissioned or expunged sleds, so we consider in-service + // sleds. + let mut reasons = Vec::new(); + + // Condition 1 above. + if self.blueprint.target_release_minimum_generation() + > self.input.tuf_repo().target_release_generation + { + reasons.push(format!( + "current target release generation ({}) is lower than \ + minimum required by blueprint ({})", + self.input.tuf_repo().target_release_generation, + self.blueprint.target_release_minimum_generation(), + )); + } + + // Condition 2 above. + { + let mut sleds_with_override = BTreeSet::new(); + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + if self + .blueprint + .sled_get_remove_mupdate_override(sled_id)? + .is_some() + { + sleds_with_override.insert(sled_id); + } + } + + if !sleds_with_override.is_empty() { + reasons.push(format!( + "sleds have remove mupdate override set in blueprint: {}", + sleds_with_override.iter().join(", ") + )); + } + } + + if !reasons.is_empty() { + let reasons = reasons.join("; "); + info!( + log, + "not ready to add or update new zones yet"; + "reasons" => reasons, + ); + Ok(UpdateStepResult::Waiting) + } else { + Ok(UpdateStepResult::ContinueToNextStep) + } + } + fn do_plan_cockroachdb_settings(&mut self) { // Figure out what we should set the CockroachDB "preserve downgrade // option" setting to based on the planning input. diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index fef4bde6a2f..dbae19b3127 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -21,6 +21,7 @@ use nexus_sled_agent_shared::inventory::Inventory; use nexus_sled_agent_shared::inventory::InventoryDataset; use nexus_sled_agent_shared::inventory::InventoryDisk; use nexus_sled_agent_shared::inventory::InventoryZpool; +use nexus_sled_agent_shared::inventory::MupdateOverrideBootInventory; use nexus_sled_agent_shared::inventory::OmicronSledConfig; use nexus_sled_agent_shared::inventory::SledRole; use nexus_sled_agent_shared::inventory::ZoneImageResolverInventory; @@ -59,12 +60,14 @@ use omicron_common::disk::DiskIdentity; use omicron_common::disk::DiskVariant; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_common::policy::NEXUS_REDUNDANCY; +use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt; use std::fmt::Debug; +use std::mem; use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::sync::Arc; @@ -535,6 +538,36 @@ impl SystemDescription { Ok(sled.sp_inactive_caboose().map(|c| c.version.as_ref())) } + /// Set a sled's mupdate override field. + /// + /// Returns the previous value, or previous error if set. + pub fn sled_set_mupdate_override( + &mut self, + sled_id: SledUuid, + mupdate_override: Option, + ) -> anyhow::Result, String>> { + let sled = self.sleds.get_mut(&sled_id).with_context(|| { + format!("attempted to access sled {} not found in system", sled_id) + })?; + let sled = Arc::make_mut(sled); + Ok(sled.set_mupdate_override(Ok(mupdate_override))) + } + + /// Set a sled's mupdate override field to an error. + /// + /// Returns the previous value, or previous error if set. + pub fn sled_set_mupdate_override_error( + &mut self, + sled_id: SledUuid, + message: String, + ) -> anyhow::Result, String>> { + let sled = self.sleds.get_mut(&sled_id).with_context(|| { + format!("attempted to access sled {} not found in system", sled_id) + })?; + let sled = Arc::make_mut(sled); + Ok(sled.set_mupdate_override(Err(message))) + } + pub fn set_tuf_repo(&mut self, tuf_repo: TufRepoPolicy) { self.tuf_repo = tuf_repo; } @@ -1222,6 +1255,30 @@ impl Sled { sign: None, } } + + /// Set the mupdate override field for a sled, returning the previous value. + fn set_mupdate_override( + &mut self, + mupdate_override_id: Result, String>, + ) -> Result, String> { + // We don't alter the non-boot override because it's not used in this process. + let inv = match mupdate_override_id { + Ok(Some(id)) => Ok(Some(MupdateOverrideBootInventory { + mupdate_override_id: id, + })), + Ok(None) => Ok(None), + Err(message) => Err(message), + }; + let prev = mem::replace( + &mut self + .inventory_sled_agent + .zone_image_resolver + .mupdate_override + .boot_override, + inv, + ); + prev.map(|prev| prev.map(|prev| prev.mupdate_override_id)) + } } /// The visibility of a sled in the inventory. diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 72e35004e0a..b59b91ad439 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -20,6 +20,8 @@ pub use crate::inventory::ZpoolName; use blueprint_diff::ClickhouseClusterConfigDiffTablesForSingleBlueprint; use blueprint_display::BpDatasetsTableSchema; use daft::Diffable; +use id_map::Entry; +use id_map::RefMut; use nexus_sled_agent_shared::inventory::HostPhase2DesiredSlots; use nexus_sled_agent_shared::inventory::OmicronSledConfig; use nexus_sled_agent_shared::inventory::OmicronZoneConfig; @@ -1130,20 +1132,32 @@ impl PendingMgsUpdates { self.by_baseboard.is_empty() } - pub fn contains_key(&self, key: &Arc) -> bool { + pub fn contains_key(&self, key: &BaseboardId) -> bool { self.by_baseboard.contains_key(key) } - pub fn get( - &self, - baseboard_id: &Arc, - ) -> Option<&PendingMgsUpdate> { + pub fn get(&self, baseboard_id: &BaseboardId) -> Option<&PendingMgsUpdate> { self.by_baseboard.get(baseboard_id) } + pub fn get_mut( + &mut self, + baseboard_id: &BaseboardId, + ) -> Option> { + self.by_baseboard.get_mut(baseboard_id) + } + + pub fn entry( + &mut self, + // TODO: simplify down to &BaseboardId + baseboard_id: Arc, + ) -> Entry<'_, PendingMgsUpdate> { + self.by_baseboard.entry(baseboard_id) + } + pub fn remove( &mut self, - baseboard_id: &Arc, + baseboard_id: &BaseboardId, ) -> Option { self.by_baseboard.remove(baseboard_id) } diff --git a/sled-agent/config-reconciler/src/ledger.rs b/sled-agent/config-reconciler/src/ledger.rs index 1c53fd345c1..cd22624caf9 100644 --- a/sled-agent/config-reconciler/src/ledger.rs +++ b/sled-agent/config-reconciler/src/ledger.rs @@ -7,7 +7,9 @@ use camino::Utf8PathBuf; use dropshot::HttpError; use legacy_configs::convert_legacy_ledgers; +use nexus_sled_agent_shared::inventory::HostPhase2DesiredSlots; use nexus_sled_agent_shared::inventory::OmicronSledConfig; +use nexus_sled_agent_shared::inventory::OmicronZoneImageSource; use omicron_common::api::external::Generation; use omicron_common::ledger; use omicron_common::ledger::Ledger; @@ -68,7 +70,7 @@ pub enum LedgerNewConfigError { #[error("failed to commit sled config to ledger")] LedgerCommitFailed(#[source] ledger::Error), #[error("sled config failed artifact store existence checks: {0}")] - ArtifactStoreValidationFailed(String), + ValidationFailed(String), } impl From for HttpError { @@ -80,7 +82,7 @@ impl From for HttpError { } LedgerNewConfigError::GenerationOutdated { .. } | LedgerNewConfigError::ConfigurationChanged { .. } - | LedgerNewConfigError::ArtifactStoreValidationFailed(_) => { + | LedgerNewConfigError::ValidationFailed(_) => { HttpError::for_bad_request(None, message) } LedgerNewConfigError::LedgerCommitFailed(_) => { @@ -438,21 +440,48 @@ impl LedgerTask { } } - // Continue validating the incoming config. For now, the only other - // thing we confirm is that any referenced artifacts are present in the - // artifact store. - let mut artifact_validation_errors = Vec::new(); + // Continue validating the incoming config: + let mut validation_errors = Vec::new(); + // * If the config has a remove_mupdate_override set, then all zones + // should have their image source set to InstallDataset. + if let Some(mupdate_override_id) = new_config.remove_mupdate_override { + for zone in &new_config.zones { + match zone.image_source { + OmicronZoneImageSource::InstallDataset => {} + OmicronZoneImageSource::Artifact { hash } => { + validation_errors.push(format!( + "remove mupdate override \ + set to {mupdate_override_id}, but zone {} \ + has image source Artifact with hash {hash}", + zone.id, + )); + } + } + } + + if new_config.host_phase_2 + != HostPhase2DesiredSlots::current_contents() + { + validation_errors.push(format!( + "remove mupdate override set to {mupdate_override_id}, but \ + host phase 2 contents are not set to current: {:#?}", + new_config.host_phase_2, + )); + } + } + + // * Any referenced artifacts are present in the artifact store. for artifact_hash in config_artifact_hashes(new_config) { match self.artifact_store.get_artifact(artifact_hash).await { Ok(_file) => (), Err(err) => { - artifact_validation_errors.push(format!("{err:#}")); + validation_errors.push(format!("{err:#}")); } } } - if !artifact_validation_errors.is_empty() { - return Err(LedgerNewConfigError::ArtifactStoreValidationFailed( - artifact_validation_errors.join(", "), + if !validation_errors.is_empty() { + return Err(LedgerNewConfigError::ValidationFailed( + validation_errors.join(", "), )); } @@ -664,6 +693,7 @@ mod tests { use omicron_test_utils::dev::poll::CondCheckError; use omicron_test_utils::dev::poll::wait_for_watch_channel_condition; use omicron_uuid_kinds::InternalZpoolUuid; + use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -1092,7 +1122,7 @@ mod tests { .expect("can communicate with task") .expect_err("config should fail"); match err { - LedgerNewConfigError::ArtifactStoreValidationFailed(_) => (), + LedgerNewConfigError::ValidationFailed(_) => (), _ => panic!("unexpected error {}", InlineErrorChain::new(&err)), } @@ -1128,7 +1158,7 @@ mod tests { .expect("can communicate with task") .expect_err("config should fail"); match err { - LedgerNewConfigError::ArtifactStoreValidationFailed(_) => (), + LedgerNewConfigError::ValidationFailed(_) => (), _ => panic!("unexpected error {}", InlineErrorChain::new(&err)), } @@ -1151,6 +1181,86 @@ mod tests { logctx.cleanup_successful(); } + #[tokio::test] + async fn reject_configs_with_mupdate_override_and_artifact_image_source() { + let logctx = dev::test_setup_log( + "reject_configs_with_mupdate_override_and_artifact_image_source", + ); + + // Set up a test harness with a fake artifact. + let artifact_hash = ArtifactHash([0; 32]); + let test_harness = TestHarness::with_fake_artifacts( + logctx.log.clone(), + [artifact_hash].into_iter(), + ) + .await; + + // Create a config that references a zone with this artifact hash, and + // with remove_mupdate_override set to a value. + let mut config = make_nonempty_sled_config(); + config.remove_mupdate_override = Some(MupdateOverrideUuid::max()); + config + .zones + .insert(make_dummy_zone_config_using_artifact_hash(artifact_hash)); + + // The ledger task should reject this config due to the artifact store + // set to InstallDataset. + let err = test_harness + .task_handle + .set_new_config(config.clone()) + .await + .expect("can communicate with task") + .expect_err("config should fail"); + match err { + LedgerNewConfigError::ValidationFailed(message) => { + assert!( + message.contains("remove mupdate override set to"), + "error message contains \"remove mupdate override \ + set to\": {message}" + ); + } + _ => panic!("unexpected error {}", InlineErrorChain::new(&err)), + } + + // Try a config where the host phase 2 contents are not set to + // CurrentContents. + config.generation = config.generation.next(); + config.zones = IdMap::new(); + config.host_phase_2 = HostPhase2DesiredSlots { + slot_a: HostPhase2DesiredContents::CurrentContents, + slot_b: HostPhase2DesiredContents::Artifact { hash: artifact_hash }, + }; + let err = test_harness + .task_handle + .set_new_config(config.clone()) + .await + .expect("can communicate with task") + .expect_err("config should fail"); + match err { + LedgerNewConfigError::ValidationFailed(message) => { + assert!( + message.contains("remove mupdate override set to"), + "error message contains \"remove mupdate override \ + set to\": {message}" + ); + } + _ => panic!("unexpected error {}", InlineErrorChain::new(&err)), + } + + // Change the config to reference the artifact that does exist; this one + // should be accepted. + config.host_phase_2 = HostPhase2DesiredSlots::current_contents(); + + test_harness + .task_handle + .set_new_config(config) + .await + .expect("can communicate with task") + .expect("config should be ledgered"); + + logctx.cleanup_successful(); + } + #[tokio::test] async fn reject_artifact_configs_removing_referenced_artifacts() { let logctx = dev::test_setup_log( From 27bb944fd078be9a016f8f3a9558434b9705238d Mon Sep 17 00:00:00 2001 From: Rain Date: Thu, 10 Jul 2025 23:11:09 +0000 Subject: [PATCH 2/3] updates Created using spr 1.3.6-beta.1 --- sled-agent/types/src/zone_images.rs | 22 ++++++- .../zone-images/src/mupdate_override.rs | 65 ++++++++++++++++++- 2 files changed, 82 insertions(+), 5 deletions(-) diff --git a/sled-agent/types/src/zone_images.rs b/sled-agent/types/src/zone_images.rs index 16300fbea1f..371d22eb8f3 100644 --- a/sled-agent/types/src/zone_images.rs +++ b/sled-agent/types/src/zone_images.rs @@ -888,8 +888,10 @@ pub enum ClearMupdateOverrideNonBootResult { /// No status was found for the non-boot disk, possibly indicating the /// non-boot disk being missing at the time Sled Agent was started. NoStatus, - - /// The internal disk was missing + + /// The disk was missing from the latest InternalDisksWithBootDisk but was + /// present at startup. The on-disk data was not altered. + DiskMissing, /// No mupdate override was found on the non-boot disk. NoOverride, @@ -939,6 +941,13 @@ impl ClearMupdateOverrideNonBootResult { started, mupdate override not cleared" ); } + ClearMupdateOverrideNonBootResult::DiskMissing => { + warn!( + log, + "non-boot disk missing from latest InternalDisks, \ + mupdate override not cleared" + ); + } ClearMupdateOverrideNonBootResult::NoOverride => { warn!( log, @@ -980,7 +989,14 @@ impl fmt::Display for ClearMupdateOverrideNonBootDisplay<'_> { write!( f, "no status was available when sled-agent was started, \ - so not cleared" + so mupdate override not cleared" + ) + } + ClearMupdateOverrideNonBootResult::DiskMissing => { + write!( + f, + "non-boot disk missing from latest InternalDisks, \ + mupdate override not cleared" ) } ClearMupdateOverrideNonBootResult::NoOverride => { diff --git a/sled-agent/zone-images/src/mupdate_override.rs b/sled-agent/zone-images/src/mupdate_override.rs index b59d2c7a0b0..bbd3a505884 100644 --- a/sled-agent/zone-images/src/mupdate_override.rs +++ b/sled-agent/zone-images/src/mupdate_override.rs @@ -169,15 +169,76 @@ impl AllMupdateOverrides { // Are there any non-boot disks that were originally read at startup but // are missing from InternalDisksWithBootDisk? - for non_boot_disk_override in &self.non_boot_disk_overrides { + for mut non_boot_disk_override in &mut self.non_boot_disk_overrides { if !non_boot_disk_info .contains_key(&non_boot_disk_override.zpool_id) { + // If the boot disk was successfully cleared, we may have + // introduced a mismatch. + if let Ok(boot_disk_info) = &boot_disk_result { + let new_result = match &non_boot_disk_override.result { + MupdateOverrideNonBootResult::MatchesPresent => { + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::BootAbsentOtherPresent { + non_boot_disk_info: boot_disk_info.clone(), + }, + ) + } + MupdateOverrideNonBootResult::MatchesAbsent => { + unreachable!( + "boot disk absent means that \ + boot_disk_result is always an error" + ) + } + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::BootPresentOtherAbsent + ) => { + // The mupdate override file is now absent from both + // the boot and the non-boot disk, so this goes from + // mismatch to MatchesAbsent. + MupdateOverrideNonBootResult::MatchesAbsent + } + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::BootAbsentOtherPresent { + .. + }, + ) => { + unreachable!( + "boot disk absent means that \ + boot_disk_result is always an error" + ) + } + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::ValueMismatch { non_boot_disk_info }, + ) => { + // Goes to BootAbsentOtherPresent. + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::BootAbsentOtherPresent { + non_boot_disk_info: non_boot_disk_info.clone(), + } + ) + } + MupdateOverrideNonBootResult::Mismatch( + MupdateOverrideNonBootMismatch::BootDiskReadError { .. }, + ) => { + unreachable!( + "boot disk read error means that \ + boot_disk_result is always an error" + ) + } + MupdateOverrideNonBootResult::ReadError(_) + => { + non_boot_disk_override.result.clone() + } + }; + + non_boot_disk_override.result = new_result; + } non_boot_disk_info .insert_unique(ClearMupdateOverrideNonBootInfo { zpool_id: non_boot_disk_override.zpool_id, path: Some(non_boot_disk_override.path.clone()), - result: ClearMupdateOverrideNonBootResult::NoStatus, + result: ClearMupdateOverrideNonBootResult::DiskMissing, }) .expect("non-boot zpool IDs should be unique"); } From 4eee3d7d55b93ac1410ec32a1cb9a8e66ad84a46 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 22 Jul 2025 02:19:41 +0000 Subject: [PATCH 3/3] rustfmt Created using spr 1.3.6-beta.1 --- nexus/db-queries/src/db/datastore/inventory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index f154da72a42..cb7b446b65f 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -71,8 +71,8 @@ use nexus_db_model::{ }; use nexus_db_model::{HwPowerState, InvZoneManifestNonBoot}; use nexus_db_model::{HwRotSlot, InvMupdateOverrideNonBoot}; -use nexus_db_schema::enums::HwM2SlotEnum; use nexus_db_model::{InvCaboose, InvClearMupdateOverride}; +use nexus_db_schema::enums::HwM2SlotEnum; use nexus_db_schema::enums::HwRotSlotEnum; use nexus_db_schema::enums::RotImageErrorEnum; use nexus_db_schema::enums::RotPageWhichEnum;