diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..9003fbb --- /dev/null +++ b/.flake8 @@ -0,0 +1,42 @@ +[flake8] +ignore = + # Module level import not at top of file + E402 + # Whitespace before ':'; Removed per Black documentation + E203 + # Invalid escape sequence + W605 + # Python3.7+ compatibility checks + W606 + # Ambiguous variable name l + E741 + # Line break occurred before a binary operator + W503 + # Missing docstring in public module + D100 + # Missing docstring in public class + D101 + # Missing docstring in public method + D102 + # Missing docstring in public function + D103 + # Missing docstring in public package + D104 + # Missing docstring in magic method + D105 + # Missing docstring in __init__ + D107 + # One-line docstring should fit on one line with quotes + D200 + # No blank lines allowed after function docstring + D202 + # 1 blank line required between summary line and description + D205 + # First line should end with a period + D400 + # First line should be in imperative mood + D401 +max-line-length = 79 +max-complexity = 18 +select = B,C,D,E,F,W,T4,B9 +extend-ignore = E203 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..88d8df7 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,74 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "master", "develop" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "master", "develop" ] + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" \ No newline at end of file diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..6b2fa0c --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,18 @@ +# +# Copyright (c) 2022 by Delphix. All rights reserved. +# + +[settings] +default_section=THIRDPARTY + +extra_standard_library=posixpath,ntpath,Queue + +# Every import should try its best to be on one line +force_single_line=True + +# Settings needed to be compatible with black +multi_line_output=3 +include_trailing_comma=True +force_grid_wrap=0 +use_parentheses=True +line_length=79 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..0bb07e1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +repos: +- repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + exclude: > + (?x)^( + )$ + args: [--line-length=79] +- repo: https://github.com/pycqa/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + exclude: > + (?x)^( + )$ +- repo: local + hooks: + - id: copyright + name: copyright + entry: copyright.sh + language: script + types: [text] + exclude: > + (?x)^( + .flake8| + .pre-commit-config.yaml| + pyproject.toml| + schema.json| + .*__init__.py| + src/templates/service_file_template.txt| + src/config/logger_conf.ini| + README.md| + .github/workflows/codeql.yml| + .github/CODEOWNERS| + .isort.cfg| + LICENSE| + .gitignore| + plugin_config.yml + )$ diff --git a/copyright.sh b/copyright.sh new file mode 100755 index 0000000..e9c7812 --- /dev/null +++ b/copyright.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Copyright (c) 2022, 2023 by Delphix. All rights reserved. +# + +function verify_copyright() { + file=$1 + current_year=$(date +%Y) + if [[ $(grep -e "Copyright (c).*$current_year .*Delphix. All rights reserved." "$file") ]] ; then + return 0 + else + echo "Copyright check failed for file: $file" + return 1 + fi + +} + +code=0 +for file in "$@" ; do + verify_copyright "$file" + code=$(($? + $code)) +done +exit $code diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index c6c3afe..0000000 --- a/docs/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -site/ -.DS_Store -build.sh -.venv/ diff --git a/docs/.pages b/docs/.pages deleted file mode 100644 index 482eb66..0000000 --- a/docs/.pages +++ /dev/null @@ -1,7 +0,0 @@ -arrange: - - Overview.md - - InstallPlugin.md - - BuildPlugin.md - - Discovery.md - - Linking.md - - Provisioning.md diff --git a/docs/Pipfile b/docs/Pipfile deleted file mode 100644 index dc5d7ae..0000000 --- a/docs/Pipfile +++ /dev/null @@ -1,15 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -mkdocs = "==0.17.5" -mkdocs-material = "==2.9.2" -markdown-include = "==0.5.1" -mkdocs-awesome-pages-plugin = "==1.2.0" - -[requires] -python_version = "3.7" - -[dev-packages] diff --git a/docs/docs/.pages b/docs/docs/.pages deleted file mode 100644 index 73c79c0..0000000 --- a/docs/docs/.pages +++ /dev/null @@ -1,7 +0,0 @@ -arrange: - - Overview: 'index.md' - - [Installation]: 'Installation.md' - - PluginDevelopment : 'PluginDevelopment.md' - - Discovery: 'Discovery.md' - - Linking: 'Linking.md' - - Provisioning: 'Provisioning.md' diff --git a/docs/docs/Discovery.md b/docs/docs/Discovery.md deleted file mode 100644 index 0b5a76f..0000000 --- a/docs/docs/Discovery.md +++ /dev/null @@ -1,79 +0,0 @@ -# Discovery - - -Environment discovery is a process that enables the Couchbase Plugin to determine Couchbase installation details on a host. Database discovery is initiated during the environment set up process -. Whenever there is any change (installing a new database home) to an already set up environment in the Delphix application, we need to perform an environment refresh. - - -Prerequisites -============= - -- A source environment must be added to the Delphix Engine. -- Installation of the Couchbase Plugin is required before the Discovery. -- Environment variable `$COUCHBASE_PATH ` should set on staging/target host, which contains the binary path of Couchbase. - - -Refreshing an Environment -========================= -Environment refresh will update the metadata associated with that environment and send a new Plugin to the host. - -1. Login to the **Delphix Management** application. -2. Click **Manage**. -3. Select **Environments**. -4. In the Environments panel, click the name of the environment you want to refresh. -5. Select the **Refresh** icon. -6. In the Refresh confirmation dialog select **Refresh**. - -![Screenshot](/couchbase-plugin/image/image9.png) - - -XDCR Setup -=================== -Environments exist to contain `repositories`, and each environment may have any number of repositories associated with it. -`Repository` contains database instances and in each repository any number of `SourceConfig` objects, which represent known database instances. Source config is not generated automatically in - Couchbase plugin. Therefore, we need to add `SourceConfig` object through which can create a dSource. - -1. Login to the **Delphix Management** application. -2. Click **Manage**. -3. Select **Environments**. -4. Select the repository. -5. Click on **+** icon (Shown in next image). - - -![Screenshot](/couchbase-plugin/image/image10.png) - - -6. Add required details in the `Add database` section. - - Enter port number in **Source Couchbase port** section. - - Enter source host address in section **Source Host**. - - Enter unique name for the staging database in **identify field** section. - - Enter Couchbase data path of staging host in **DB data path** section. - - -![Screenshot](/couchbase-plugin/image/image11.png) - - -CBBACKUPMGR Setup -================= - -The steps to add source config remain the same as we saw in XDCR setup. In this approach, we don't connect to source environment as this is zero-touch production approach. -We can enter any random or dummy value in this field of source host name when we choose CBBACKUPMGR option for data ingestion. - -1. Login to the **Delphix Management** application. -2. Click **Manage**. -3. Select **Environments**. -4. Select the repository. -5. Click on **+** icon (Shown in next image). -![Screenshot](/couchbase-plugin/image/image10.png) - -6. In the **Add Database** section enter the following information: - - `Source Couchbase port`: This is the port number to be used by Couchbase services. - - `Source Host`: Leave this field as blank. - - `identity field`: Provide unique name for staging database. - - `DB data path`: Leave this field as blank. - - -![Screenshot](/couchbase-plugin/image/image11.png) - - - diff --git a/docs/docs/Installation.md b/docs/docs/Installation.md deleted file mode 100644 index 5b9bc07..0000000 --- a/docs/docs/Installation.md +++ /dev/null @@ -1,39 +0,0 @@ -# Plugin Installation - - -Prerequisites -============= - -- Install Delphix Engine of version 5.3.x. -- Install Couchbase binaries on source, staging, and target servers - - -Installing the Couchbase Plugin -=============================== -Method1: Using GUI ------------------- - -1. Click on **Manage** (present on top-left of this page) and then **Plugins**. -![Screenshot](/couchbase-plugin/image/image2.png) - -2. Click on **+** icon. -![Screenshot](/couchbase-plugin/image/image3.png) - -3. Click on **Upload** a plugin. -![Screenshot](/couchbase-plugin/image/image4.png) - -4. Select the `build(artifacts.json)` from your device. -![Screenshot](/couchbase-plugin/image/image5.png) - -5. Click on **close** button. -![Screenshot](/couchbase-plugin/image/image6.png) - -6. See the plugin version in **Plugins** section. -![Screenshot](/couchbase-plugin/image/image7.png) - - -Method2: Using dvp command ---------------------------- - `dvp upload -e -u --password ` - -Delphix Engine's documentation on installing plugins: [PluginManagement](https://docs.delphix.com/docs/datasets/unstructured-files-and-app-data/delphix-engine-plugin-management). diff --git a/docs/docs/Linking.md b/docs/docs/Linking.md deleted file mode 100644 index 2bb90e3..0000000 --- a/docs/docs/Linking.md +++ /dev/null @@ -1,93 +0,0 @@ -# Linking - -Linking a data source will create a dSource object on the engine and allow Delphix to ingest data from this source. The dSource is an object that the Delphix Virtualization Engine uses to create and update virtual copies of your database. - - - -Prerequisites -============= - -Discovery and source config object should be created on the staging host before proceeding to link Couchbase dataset. - - -Creating dSource -============== - - -1. Login to **Delphix Management** application. -2. Click **Manage** > **Datasets**. -3. Select **Add dSource**. -4. In the Add dSource wizard, select the Couchbase source configuration which is created on the staging host. -5. Enter the Couchbase-specific parameters for your dSource configuration. -6. Select the dSource approach from the drop-down (XDCR and Couchbase Backup Manager) available on dSource wizard. -7. Based on approach selection, follow the steps either for XDCR or Couchbase Backup Manager method. The Description of both methods is below. - - - Method1: XDCR --------------- -Cross datacenter replication allows data to be replicated across clusters that are potentially located in different data centers. - - -1. Enter the details for **Staging Couchbase host** - FQDN or IP address recommended. -2. Enter the details for **Staging Port Number** available on the staging host. The default port for couchbase is 8091. -3. Enter the details for **Mount Path** available on the staging host. This empty folder acts as a base for NFS mounts. -4. Enter the details for **Staging Cluster Name** to setup new cluster on the staging host. -5. Enter the configuration details for your staging cluster as per resource availability on the staging host. - - Cluster RAM Size - - Cluster Index RAM Size - - Cluster FTS RAM Size - - Cluster Eventing RAM Size - - Cluster Analysis RAM Size -![Screenshot](/couchbase-plugin/image/image12.png) - -6. Click on **+** plus symbol to modify configuration settings. Mention bucket list for which cross datacenter replication (XDCR) only be enabled. -![Screenshot](/couchbase-plugin/image/image14.png) - -7. Enter the details of **Bucket Name** to be part of XDCR. Then click on **Next** button -![Screenshot](/couchbase-plugin/image/image15.png) - -8. Provide the details for **dSource Name** and **Target group** on the dSource configuration page. -![Screenshot](/couchbase-plugin/image/image16.png) - -9. On the **Data management** page, select the following: - - Staging Environment: This will be your staging host where source config was created. - - User: Database OS user with required privileges for linking the dataset. -10. On the next section, review the configuration and click on **Next** button to view the summary. -11. Click the **Submit** button which will initiate the linking process. -![Screenshot](/couchbase-plugin/image/image17.png) -12. Once dSource is created successfully, you can review the datasets on **Manage** > **Datasets** > **dSource Name**. -![Screenshot](/couchbase-plugin/image/image19.png) - - -Method2: Couchbase Backup Manager ---------------------------------------- -**Note**: Follow the instructions below before creating dSource to avoid source/production server dependency. - -- Provide source server buckets related information in a file: */tmp/couchbase_src_bucket_info.cfg*. - `/opt/couchbase/bin/couchbase-cli bucket-list --cluster :8091 --username $username --password $password` - -- **Backup Repository**: This file will be required at the time of dSource creation using CBBACKUPMGR. - `/opt/couchbase/bin/cbbackupmgr config --archive /u01/couchbase_backup --repo delphix` - -- **Backup Location**: Get data from source host in backup directory of staging host. -`/opt/couchbase/bin/cbbackupmgr backup -a /u01/couchbase_backup -r delphix -c couchbase:// -u user -p password` - - -**Procedure**: - - -1. Login to **Delphix Management** application. -2. Click **Manage** > **Datasets**. -3. Select **Add dSource**. -4. In the **Add dSource wizard**, select the Couchbase source configuration you created on the staging host. -5. Enter the Couchbase-specific parameters for your dSource configuration. -6. Select the dSource type from the drop-down available on dSource wizard. -7. When we select CBBACKUPMGR as dSource Type, the following fields on dSource wizard are mandatory. - - Enter the details for **Backup Location** where the backup files generated through CBBACKUPMGR are present on the staging host. - - Enter the details for **Backup Repository** that contains a backup configuration of staging host. -8. The remaining steps for CBBACKUPMGR ingestion are similar to XDCR. Use steps from the second point mentioned in XDCR method. - -Note: When we select dSource type as Couchbase Backup Manager, we do not require any details for the `Staging Couchbase Host` field. - -![Screenshot](/couchbase-plugin/image/image22.png) - diff --git a/docs/docs/PluginDevelopment.md b/docs/docs/PluginDevelopment.md deleted file mode 100644 index bf00e22..0000000 --- a/docs/docs/PluginDevelopment.md +++ /dev/null @@ -1,42 +0,0 @@ -Building a Plugin ------------------ - - 1. Create a virtual environment and use the following script to install the required libraries (pytest, pytest-html, and pytest-cov): - i) `cd /Users//Desktop/Plugins/OpenSourceCouchbase/couchbase-plugin` - - ii) `./test/virtualEnvSetup.sh "MyLocalEnv"` - - 2. Run this command to activate the virtual environment created in step 1. - `. test/MyLocalEnv/bin/activate` - - 3. Build the source code. It generates the build with name `artifact.json`: - `dvp build` - -Uploading a Plugin ------------------- - Upload the `artifact.json` (generated in step 3) on Delphix Engine: -```bash - dvp upload -e -u --password -``` - - -Unit Test ---------- - Unit test run: Make sure to build the source code (using `dvp build - `) before running unit tests. Execute below command to run unit tests: - ` pytest test/`. - -Summary Report ---------------- -A report with the name `Report.html` gets generated in the `test` directory which contains the summary of test passed vs failed. If any test case fails then a complete stack trace can be seen in that - test case section. - -Module wise coverage report ---------------------------- -There is a report folder `CodeCoverage`(can change the folder name in config file `pytest.ini`) generated in `test` directory, which contains html files. These files help in source code coverage visualization, in which we can see statements processed and missed in each module of source code. - - -Debugging Plugin Logs ---------------------- -Download the Plugin logs using the following command: -```dvp download-logs -c plugin_config.yml -e -u admin --password ``` diff --git a/docs/docs/Provisioning.md b/docs/docs/Provisioning.md deleted file mode 100644 index b99e8e2..0000000 --- a/docs/docs/Provisioning.md +++ /dev/null @@ -1,45 +0,0 @@ -# Provisioning - -Virtual databases are a virtualized copies of dSource. - -Prerequisites -============= - -- Required a linked dSource from a source host. -- Added compatible target environment on Delphix Engine. - - -Provisioning a VDB -================== - -1. Click on the icon highlighted in red color. -![Screenshot](/couchbase-plugin/image/image24.png) - -2. Select the target host from the dropdown on which VDB needs to be created. -![Screenshot](/couchbase-plugin/image/image25.png) - -3. Enter the following values for the target configuration: - - `Target Port Number`: Port number on which Couchbase services will be started. - - `Mount Path`: NFS mount path where dSource snapshot will be mounted by Engine. - - `Target Cluster name`: Cluster name which is required to be set up on the target host. - - `Cluster Ram Size` - - `Cluster Index Ram Size` - - `Cluster FTS Ram Size` - - `Cluster Eventing Ram Size` - - `Cluster Analytics Ram Size` - - `Target couchbase Admin User` - - `Target couchbase Admin password` -![Screenshot](/couchbase-plugin/image/image26.png) - -4. Provision vFiles: Add VDB name and target group. -![Screenshot](/couchbase-plugin/image/image27.png) - -5. No need to add Policies, select **Next**. - -6. No need to add Masking, select **Next**. - -7. No need to add Hooks, select **Next**. - -8. Preview the summary and select **Submit**. - -9. Once the VDB is created successfully, you can review the datasets on **Manage** > **Datasets** > **vdb Name**. diff --git a/docs/docs/image/AddDatabase.png b/docs/docs/image/AddDatabase.png deleted file mode 100644 index dd78f79..0000000 Binary files a/docs/docs/image/AddDatabase.png and /dev/null differ diff --git a/docs/docs/image/LinkingWizard.png b/docs/docs/image/LinkingWizard.png deleted file mode 100644 index 8f8ea4c..0000000 Binary files a/docs/docs/image/LinkingWizard.png and /dev/null differ diff --git a/docs/docs/image/PostDiscovery.png b/docs/docs/image/PostDiscovery.png deleted file mode 100644 index bc8b4dd..0000000 Binary files a/docs/docs/image/PostDiscovery.png and /dev/null differ diff --git a/docs/docs/image/PostUpload.png b/docs/docs/image/PostUpload.png deleted file mode 100644 index ff26948..0000000 Binary files a/docs/docs/image/PostUpload.png and /dev/null differ diff --git a/docs/docs/image/Resync.png b/docs/docs/image/Resync.png deleted file mode 100644 index 114b686..0000000 Binary files a/docs/docs/image/Resync.png and /dev/null differ diff --git a/docs/docs/image/delphix-logo-black1x.svg b/docs/docs/image/delphix-logo-black1x.svg deleted file mode 100644 index 2e34dfe..0000000 --- a/docs/docs/image/delphix-logo-black1x.svg +++ /dev/null @@ -1 +0,0 @@ -delphix-logo-black1x \ No newline at end of file diff --git a/docs/docs/image/delphix-logo-white.png b/docs/docs/image/delphix-logo-white.png deleted file mode 100644 index 8010286..0000000 Binary files a/docs/docs/image/delphix-logo-white.png and /dev/null differ diff --git a/docs/docs/image/image1.png b/docs/docs/image/image1.png deleted file mode 100644 index ae0881d..0000000 Binary files a/docs/docs/image/image1.png and /dev/null differ diff --git a/docs/docs/image/image10.png b/docs/docs/image/image10.png deleted file mode 100644 index 4f66229..0000000 Binary files a/docs/docs/image/image10.png and /dev/null differ diff --git a/docs/docs/image/image11.png b/docs/docs/image/image11.png deleted file mode 100644 index 7d36e4c..0000000 Binary files a/docs/docs/image/image11.png and /dev/null differ diff --git a/docs/docs/image/image12.png b/docs/docs/image/image12.png deleted file mode 100644 index d7e7474..0000000 Binary files a/docs/docs/image/image12.png and /dev/null differ diff --git a/docs/docs/image/image13.png b/docs/docs/image/image13.png deleted file mode 100644 index 6294c34..0000000 Binary files a/docs/docs/image/image13.png and /dev/null differ diff --git a/docs/docs/image/image14.png b/docs/docs/image/image14.png deleted file mode 100644 index 0414bba..0000000 Binary files a/docs/docs/image/image14.png and /dev/null differ diff --git a/docs/docs/image/image15.png b/docs/docs/image/image15.png deleted file mode 100644 index 450164b..0000000 Binary files a/docs/docs/image/image15.png and /dev/null differ diff --git a/docs/docs/image/image16.png b/docs/docs/image/image16.png deleted file mode 100644 index 6d6cf62..0000000 Binary files a/docs/docs/image/image16.png and /dev/null differ diff --git a/docs/docs/image/image17.png b/docs/docs/image/image17.png deleted file mode 100644 index 7b8d988..0000000 Binary files a/docs/docs/image/image17.png and /dev/null differ diff --git a/docs/docs/image/image18.png b/docs/docs/image/image18.png deleted file mode 100644 index d27e6c4..0000000 Binary files a/docs/docs/image/image18.png and /dev/null differ diff --git a/docs/docs/image/image19.png b/docs/docs/image/image19.png deleted file mode 100644 index f0029dc..0000000 Binary files a/docs/docs/image/image19.png and /dev/null differ diff --git a/docs/docs/image/image2.png b/docs/docs/image/image2.png deleted file mode 100644 index c9e4742..0000000 Binary files a/docs/docs/image/image2.png and /dev/null differ diff --git a/docs/docs/image/image20.png b/docs/docs/image/image20.png deleted file mode 100644 index c7d120c..0000000 Binary files a/docs/docs/image/image20.png and /dev/null differ diff --git a/docs/docs/image/image21.png b/docs/docs/image/image21.png deleted file mode 100644 index 5c6aa16..0000000 Binary files a/docs/docs/image/image21.png and /dev/null differ diff --git a/docs/docs/image/image22.png b/docs/docs/image/image22.png deleted file mode 100644 index 9f66248..0000000 Binary files a/docs/docs/image/image22.png and /dev/null differ diff --git a/docs/docs/image/image23.png b/docs/docs/image/image23.png deleted file mode 100644 index e7b31fd..0000000 Binary files a/docs/docs/image/image23.png and /dev/null differ diff --git a/docs/docs/image/image24.png b/docs/docs/image/image24.png deleted file mode 100644 index 1a5af97..0000000 Binary files a/docs/docs/image/image24.png and /dev/null differ diff --git a/docs/docs/image/image25.png b/docs/docs/image/image25.png deleted file mode 100644 index b47ec7a..0000000 Binary files a/docs/docs/image/image25.png and /dev/null differ diff --git a/docs/docs/image/image26.png b/docs/docs/image/image26.png deleted file mode 100644 index aa73e15..0000000 Binary files a/docs/docs/image/image26.png and /dev/null differ diff --git a/docs/docs/image/image27.png b/docs/docs/image/image27.png deleted file mode 100644 index 7bc93fe..0000000 Binary files a/docs/docs/image/image27.png and /dev/null differ diff --git a/docs/docs/image/image28.png b/docs/docs/image/image28.png deleted file mode 100644 index 4762cf7..0000000 Binary files a/docs/docs/image/image28.png and /dev/null differ diff --git a/docs/docs/image/image29.png b/docs/docs/image/image29.png deleted file mode 100644 index ba9c099..0000000 Binary files a/docs/docs/image/image29.png and /dev/null differ diff --git a/docs/docs/image/image3.png b/docs/docs/image/image3.png deleted file mode 100644 index d8fa516..0000000 Binary files a/docs/docs/image/image3.png and /dev/null differ diff --git a/docs/docs/image/image4.png b/docs/docs/image/image4.png deleted file mode 100644 index 375c6d7..0000000 Binary files a/docs/docs/image/image4.png and /dev/null differ diff --git a/docs/docs/image/image5.png b/docs/docs/image/image5.png deleted file mode 100644 index 20f406e..0000000 Binary files a/docs/docs/image/image5.png and /dev/null differ diff --git a/docs/docs/image/image6.png b/docs/docs/image/image6.png deleted file mode 100644 index 562d7de..0000000 Binary files a/docs/docs/image/image6.png and /dev/null differ diff --git a/docs/docs/image/image7.png b/docs/docs/image/image7.png deleted file mode 100644 index 01349a9..0000000 Binary files a/docs/docs/image/image7.png and /dev/null differ diff --git a/docs/docs/image/image8.png b/docs/docs/image/image8.png deleted file mode 100644 index 527e935..0000000 Binary files a/docs/docs/image/image8.png and /dev/null differ diff --git a/docs/docs/image/image9.png b/docs/docs/image/image9.png deleted file mode 100644 index 56f4135..0000000 Binary files a/docs/docs/image/image9.png and /dev/null differ diff --git a/docs/docs/image/logo.png b/docs/docs/image/logo.png deleted file mode 100644 index b65b41f..0000000 Binary files a/docs/docs/image/logo.png and /dev/null differ diff --git a/docs/docs/image/logo1.jpg b/docs/docs/image/logo1.jpg deleted file mode 100644 index ce9933b..0000000 Binary files a/docs/docs/image/logo1.jpg and /dev/null differ diff --git a/docs/docs/image/logo11.png b/docs/docs/image/logo11.png deleted file mode 100644 index f589b8e..0000000 Binary files a/docs/docs/image/logo11.png and /dev/null differ diff --git a/docs/docs/image/logo2.png b/docs/docs/image/logo2.png deleted file mode 100644 index 5212949..0000000 Binary files a/docs/docs/image/logo2.png and /dev/null differ diff --git a/docs/docs/index.md b/docs/docs/index.md deleted file mode 100644 index 7843367..0000000 --- a/docs/docs/index.md +++ /dev/null @@ -1,56 +0,0 @@ -# Overview - -Couchbase plugin is developed to virtualize Couchbase data source leveraging the following built-in couchbase technologies: - - -- Cross Data Center Replication (XDCR) allows data to be replicated across clusters that are potentially located in different data centers. -- Cbbackupmgr allows data to be restored on staging host of Couchbase Server. - -Ingesting Couchbase ----------------- - -1. Couchbase cluster/single instance using (XDCR ingestion mechanism). -2. Couchbase backup piece using (CBBACKUPMGR ingestion mechanism) - Zero Touch Production. - -### Prerequisites -**Source Requirements:** Couchbase database user with the following privileges: - -* XDCR_ADMIN -* DATA_MONITOR - -**Staging Requirements**: O/S user with the following privileges: - -1. Regular o/s user. -2. Execute access on couchbase binaries [ chmod -R 775 /opt/couchbase ]. -3. Empty folder on host to hold delphix toolkit [ approximate 2GB free space ]. -4. Empty folder on host to mount nfs filesystem. This is just an empty folder with no space requirements and acts as a base folder for NFS mounts. -5. sudo privileges for mount and umount. See sample below assuming `delphix_os` is used as delphix user. - ```shell - Defaults:delphixos !requiretty - delphixos ALL=NOPASSWD: \ - /bin/mount, /bin/umount - ``` -6. Customers who intend to use CBBACKUPMGR (Couchbase backup manager ingestion) must follow the instructions to avoid source/production server dependency. - - * Provide all source server buckets related information (using the command below) in a file and place where `/couchbase_src_bucket_info.cfg`: - `/opt/couchbase/bin/couchbase-cli bucket-list --cluster :8091 --username $username --password $password` - - * Create config file using the following command. This file will be required at the time of dSource creation using CBBACKUPMGR. - `/opt/couchbase/bin/cbbackupmgr config --archive /u01/couchbase_backup --repo delphix` - - * Get data from source host in backup directory of staging host - `/opt/couchbase/bin/cbbackupmgr backup -a /u01/couchbase_backup -r delphix -c couchbase:// -u user -p password` - - - -**Target Requirements**: O/S user with the following privileges: - -1. Regular o/s user. -2. Execute access on couchbase binaries `[ chmod -R 775 /opt/couchbase ]`. -3. Empty folder on host to hold Delphix toolkit `[ approximate 2GB free space ]`. -4. Empty folder on host to mount nfs filesystem. This is just an empty folder with no space requirements and act as a base folder for NFS mounts. -5. sudo privileges for mount and umount. See sample below assuming `delphix_os` is used as delphix user. - `Defaults:delphixos !requiretty` - `delphixos ALL=NOPASSWD: /bin/mount, /bin/umount` - - diff --git a/docs/material/__init__.py b/docs/material/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/docs/material/manifest.json b/docs/material/manifest.json deleted file mode 100644 index 016754a..0000000 --- a/docs/material/manifest.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "assets/images/icons/bitbucket.svg": "assets/images/icons/bitbucket.1b09e088.svg", - "assets/images/icons/github.svg": "assets/images/icons/github.f0b8504a.svg", - "assets/images/icons/gitlab.svg": "assets/images/icons/gitlab.6dd19c00.svg", - "assets/javascripts/application.js": "assets/javascripts/application.878fdd8d.js", - "assets/javascripts/modernizr.js": "assets/javascripts/modernizr.1aa3b519.js", - "assets/stylesheets/application-palette.css": "assets/stylesheets/application-palette.22915126.css", - "assets/stylesheets/application.css": "assets/stylesheets/application.fbb7f3af.css" -} \ No newline at end of file diff --git a/docs/material/mkdocs_theme.yml b/docs/material/mkdocs_theme.yml deleted file mode 100644 index 2cd43ea..0000000 --- a/docs/material/mkdocs_theme.yml +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2016-2018 Martin Donath - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. - -# Language for theme localization -language: en - -# Text direction (can be ltr or rtl), default: ltr -direction: - -# Feature flags for functionality that alters behavior significantly, and thus -# may be a matter of taste -feature: - - # Another layer on top of the main navigation for larger screens in the form - # of tabs, especially useful for larger documentation projects - tabs: false - -# Sets the primary and accent color palettes as defined in the Material Design -# documentation - possible values can be looked up in the getting started guide -palette: - - # Primary color used for header, sidebar and links, default: indigo - primary: - - # Accent color for highlighting user interaction, default: indigo - accent: - -# Fonts used by Material, automatically loaded from Google Fonts - see the site -# for a list of available fonts -font: - - # Default font for text - text: Roboto - - # Fixed-width font for code listings - code: Roboto Mono - -# Favicon to be rendered -favicon: assets/images/favicon.png - -# The logo of the documentation shown in the header and navigation can either -# be a Material Icon ligature (see https://material.io/icons/) or an image URL -logo: - icon: "\uE80C" - -# Material includes the search in the header as a partial, not as a separate -# template, so it's correct that search.html is missing -include_search_page: false - -# Material doesn't use MkDocs search functionality but provides its own. For -# this reason, only the search index needs to be built -search_index_only: true - -# Static pages to build -static_templates: - - 404.html diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml deleted file mode 100644 index 975473f..0000000 --- a/docs/mkdocs.yml +++ /dev/null @@ -1,56 +0,0 @@ -site_name: Couchbase Plugin -theme: - name: material - custom_dir: 'material/' - palette: - primary: - accent: - logo: 'image/delphix-logo-white.png' - favicon: 'image/logo.png' - font: - text: Helvetica Neue - code: Ubuntu Mono - -copyright: Copyright © 2020 Delphix Corp. - -google_analytics: - - 'UA-35429885-3' - - 'auto' - -repo_url: https://github.com/delphix/couchbase-plugin -repo_name: Github -use_directory_urls: false -pages: - - Overview: 'index.md' - - Installation: 'Installation.md' - - Plugin Development : 'PluginDevelopment.md' - - Discovery: 'Discovery.md' - - Linking: 'Linking.md' - - Provisioning: 'Provisioning.md' - -extra: - social: - - type: sitemap - link: https://www.delphix.com/ - - type: facebook - link: https://www.facebook.com/DelphixCorp/ - - type: twitter - link: https://twitter.com/delphix - - type: linkedin - link: https://www.linkedin.com/company/delphix - - type: github - link: https://github.com/delphix - -extra_css: - - 'stylesheets/extra.css' - -markdown_extensions: - - toc: - permalink: true - - admonition - - codehilite: - guess_lang: false - - meta - -plugins: - - search diff --git a/docs/readme.md b/docs/readme.md deleted file mode 100644 index e69de29..0000000 diff --git a/plugin_config.yml b/plugin_config.yml index 1fe3c86..ab59161 100644 --- a/plugin_config.yml +++ b/plugin_config.yml @@ -1,8 +1,8 @@ id: 18f4ff11-b758-4bf2-9a37-719a22f5a4b8 -name: couchbase -externalVersion: 1.0.0 -buildNumber: 1.0.0 -language: PYTHON27 +name: sc:couchbase +externalVersion: "1.3.1" +buildNumber: 1.3.1 +language: PYTHON38 hostTypes: - UNIX pluginType: STAGED diff --git a/schema.json b/schema.json index c6765a5..31a253f 100644 --- a/schema.json +++ b/schema.json @@ -28,6 +28,16 @@ "type": "string", "prettyName": "Identity Name", "description": "Pretty name of this repository" + }, + "uid": { + "type": "integer", + "prettyName": "Couchbase User ID", + "description": "Couchbase User Identifier" + }, + "gid": { + "type": "integer", + "prettyName": "Couchbase Group ID", + "description": "Couchbase Group ID" } }, "nameField": "prettyName", @@ -106,9 +116,7 @@ "bucketEvictionPolicy", "couchbaseAdmin", "couchbaseAdminPassword", - "fts_service", - "analytics_service", - "eventing_service"], + "node_list"], "properties" : { "couchbasePort": { "type": "integer", @@ -180,23 +188,73 @@ "default": "" }, "fts_service": { - "default": true, - "type": "boolean", - "prettyName": "FTS Service", - "description": "" - }, - "analytics_service": { - "default": true, - "type": "boolean", - "prettyName": "Analytics Service", - "description": "" - }, - "eventing_service": { - "default": true, - "type": "boolean", - "prettyName": "Eventing Service", - "description": "" - } + "default": true, + "type": "boolean", + "prettyName": "FTS Service", + "description": "" + }, + "analytics_service": { + "default": true, + "type": "boolean", + "prettyName": "Analytics Service", + "description": "" + }, + "eventing_service": { + "default": true, + "type": "boolean", + "prettyName": "Eventing Service", + "description": "" + }, + "node_list": { + "type": "array", + "prettyName": "Additional Nodes", + "items": { + "type": "object", + "required": ["environment", "environmentUser", "node_addr"], + "ordering": ["environment", "environmentUser", "node_addr", "fts_service", "analytics_service", "eventing_service"], + "properties": { + "environment": { + "type": "string", + "format": "reference", + "referenceType": "UnixHostEnvironment", + "prettyName": "Delphix Environment name", + "description": "" + }, + "environmentUser": { + "type": "string", + "format": "reference", + "referenceType": "EnvironmentUser", + "prettyName": "Delphix Environment User", + "description": "", + "matches": "environment" + }, + "node_addr": { + "type": "string", + "prettyName": "Node hostname / IP", + "description": "", + "default": "" + }, + "fts_service": { + "default": false, + "type": "boolean", + "prettyName": "FTS Service", + "description": "" + }, + "analytics_service": { + "default": false, + "type": "boolean", + "prettyName": "Analytics Service", + "description": "" + }, + "eventing_service": { + "default": false, + "type": "boolean", + "prettyName": "Eventing Service", + "description": "" + } + } + } + } } }, "linkedSourceDefinition": { @@ -214,17 +272,16 @@ "clusterEventingRAMSize", "clusterAnalyticsRAMSize", "bucketEvictionPolicy", - "bucketSize", "couchbaseAdmin", "couchbaseAdminPassword", - "xdcrAdmin", - "xdcrAdminPassword" + "xdcrAdmin" ], "ordering" : [ "dSourceType", "couchbaseHost", "couchbasePort", "couchbaseBakLoc", + "archiveName", "couchbaseBakRepo", "mountPath", "stgClusterName", @@ -240,9 +297,8 @@ "xdcrAdmin", "xdcrAdminPassword", "fts_service", - "analytics_service", "eventing_service", - "config_settings_prov" + "config_settings_prov" ], "properties" : { "dSourceType": { @@ -271,6 +327,13 @@ "description": "Backups taken via cbbackupmgr", "default": "" }, + "archiveName": { + "type": "string", + "format": "unixpath", + "prettyName": "Backup Archive Name", + "description": "Archive Name for Backups taken", + "default": "" + }, "couchbaseBakRepo": { "type": "string", "format": "unixpath", @@ -329,11 +392,36 @@ "default": "valueOnly" }, "bucketSize": { - "type": "integer", - "prettyName": "Bucket Size", - "description": "The default bucket size", - "default": 0 - }, + "type": "array", + "prettyName": "Bucket RAM Size", + "description": "RAM to be allocated to each bucket", + "items": { + "type": "object", + "required": [ + "bname", + "bsize" + ], + "ordering": [ + "bname", + "bsize" + ], + "properties": { + "bname": { + "type": "string", + "prettyName": "Bucket Name", + "default": "", + "maxLength": 40 + }, + "bsize": { + "type": "integer", + "prettyName": "Bucket Size (in MB)", + "description": "Greater than or equal to 100MB.", + "default": 100, + "minimum": 100 + } + } + } + }, "couchbaseAdmin": { "type": "string", "prettyName": "Staging Couchbase Admin User", @@ -361,23 +449,23 @@ "default": "" }, "fts_service": { - "default": true, + "default": false, "type": "boolean", "prettyName": "FTS Service", "description": "" }, - "analytics_service": { - "default": true, - "type": "boolean", - "prettyName": "Analytics Service", - "description": "" - }, "eventing_service": { - "default": true, + "default": false, "type": "boolean", "prettyName": "Eventing Service", "description": "" }, + "analytics_service": { + "default": true, + "type": "boolean", + "prettyName": "Analytics Service", + "description": "" + }, "config_settings_prov" : { "type": "array", "prettyName": "Config Settings", @@ -436,7 +524,40 @@ }, "bucketList": { "type": "string" + }, + "snapshotPassword": { + "type": "string", + "format": "password" + }, + "indexes": { + "type": "array", + "items": [ + {"type": "string"} + ] + }, + "couchbaseAdmin": { + "type": "string", + "prettyName": "Source Couchbase Admin User", + "description": "" + }, + "couchbaseAdminPassword": { + "type": "string", + "format": "password", + "prettyName": "Source Couchbase Admin Password" } } - } + }, + "snapshotParametersDefinition": { + "type" : "object", + "additionalProperties" : false, + "required": ["resync"], + "properties" : { + "resync" : { + "type": "boolean", + "default": true, + "prettyName": "Resynchronize dSource", + "description": "Resynchronizing dSource will force a non-incremental load of data from the source. This operation is similar to creating a new dSource, but avoids duplicating storage requirements and maintains timeflow history." + } + } + } } diff --git a/src/controller/couchbase_lib/_bucket.py b/src/controller/couchbase_lib/_bucket.py index ae296f1..dabd243 100644 --- a/src/controller/couchbase_lib/_bucket.py +++ b/src/controller/couchbase_lib/_bucket.py @@ -1,28 +1,31 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ - This class contains methods for all bucket related operations .This is child class of Resource and parent class - of CouchbaseOperation + This class contains methods for all bucket related operations . + This is child class of Resource and parent class + of CouchbaseOperation. """ -####################################################################################################################### -import logging -from utils import utilities import json -from internal_exceptions.database_exceptions import BucketOperationError + +############################################################################## +import logging +from os.path import join + from controller import helper_lib from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource from db_commands.commands import CommandFactory -from db_commands.constants import ENV_VAR_KEY, EVICTION_POLICY +from db_commands.constants import ENV_VAR_KEY +from internal_exceptions.database_exceptions import BucketOperationError +from utils import utilities logger = logging.getLogger(__name__) class _BucketMixin(Resource, MixinInterface): - def __init__(self, builder): super(_BucketMixin, self).__init__(builder) @@ -37,41 +40,70 @@ def bucket_edit(self, bucket_name, flush_value=1): logger.debug("Editing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_edit(bucket_name=bucket_name, flush_value=flush_value, **env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - return utilities.execute_bash(self.connection, command, **kwargs) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } + env.update(kwargs[ENV_VAR_KEY]) + command, env_vars = CommandFactory.bucket_edit_expect( + bucket_name=bucket_name, flush_value=flush_value, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + logger.debug("edit bucket {}".format(command)) + return utilities.execute_expect(self.connection, command, **kwargs) def bucket_edit_ramquota(self, bucket_name, _ramsize): """ - :param bucket_name: Required bucket_name on which edit operation will run + :param bucket_name: Required bucket_name on which edit operation will + run :param _ramsize: :return: """ # It requires the before bucket delete logger.debug("Editing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_edit_ramquota(bucket_name=bucket_name, ramsize=_ramsize, **env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - return utilities.execute_bash(self.connection, command, **kwargs) + env.update(kwargs[ENV_VAR_KEY]) + command, env_vars = CommandFactory.bucket_edit_ramquota_expect( + bucket_name=bucket_name, ramsize=_ramsize, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + logger.debug("edit ram bucket {}".format(command)) + return utilities.execute_expect(self.connection, command, **kwargs) def bucket_delete(self, bucket_name): # To delete the bucket logger.debug("Deleting bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_delete(bucket_name=bucket_name, **env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - return utilities.execute_bash(self.connection, command, **kwargs) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } + env.update(kwargs[ENV_VAR_KEY]) + command, env_vars = CommandFactory.bucket_delete_expect( + bucket_name=bucket_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + logger.debug("delete bucket {}".format(command)) + return utilities.execute_expect(self.connection, command, **kwargs) def bucket_flush(self, bucket_name): # It requires the before bucket delete logger.debug("Flushing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_flush(bucket_name=bucket_name, **env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - return utilities.execute_bash(self.connection, command, **kwargs) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } + env.update(kwargs[ENV_VAR_KEY]) + command, env_vars = CommandFactory.bucket_flush_expect( + bucket_name=bucket_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + logger.debug("flush bucket {}".format(command)) + return utilities.execute_expect(self.connection, command, **kwargs) def bucket_remove(self, bucket_name): logger.debug("Removing bucket: {} ".format(bucket_name)) @@ -81,58 +113,202 @@ def bucket_remove(self, bucket_name): self.bucket_delete(bucket_name) helper_lib.sleepForSecond(2) - def bucket_create(self, bucket_name, ram_size=0): - logger.debug("Creating bucket: {} ".format(bucket_name)) + def bucket_create( + self, + bucket_name, + ram_size, + bucket_type, + bucket_compression, + retry: bool = True, + ): + logger.debug(f"Creating bucket: {bucket_name} ") # To create the bucket with given ram size self.__validate_bucket_name(bucket_name) if ram_size is None: - logger.debug("Needed ramsize for bucket_create. Currently it is: {}".format(ram_size)) + logger.debug( + "Needed ramsize for bucket_create. " + f"Currently it is: {ram_size}" + ) return + + if bucket_type == "membase": + # API return different type + bucket_type = "couchbase" + + if bucket_compression is not None: + bucket_compression = f"--compression-mode {bucket_compression}" + else: + bucket_compression = "" + policy = self.parameters.bucket_eviction_policy env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_create(bucket_name=bucket_name, ramsize=ram_size, evictionpolicy=policy, **env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - output, error, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } + env.update(kwargs[ENV_VAR_KEY]) + command, env_vars = CommandFactory.bucket_create_expect( + bucket_name=bucket_name, + ramsize=ram_size, + evictionpolicy=policy, + bucket_type=bucket_type, + bucket_compression=bucket_compression, + **env, + ) + logger.debug(f"create bucket {command} for {bucket_name}") + kwargs[ENV_VAR_KEY].update(env_vars) + output, error, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) + logger.debug( + f"create bucket output for {bucket_name}: " + f"{output} {error} {exit_code}" + ) helper_lib.sleepForSecond(2) + bucket_list = self.bucket_list() + bucket_name_list = [item["name"] for item in bucket_list] + if bucket_name not in bucket_name_list and retry: + self.bucket_create( + bucket_name=bucket_name, + ram_size=ram_size, + bucket_type=bucket_type, + bucket_compression=bucket_compression, + retry=False, + ) + logger.debug( + f"Bucket creation failed for {bucket_name} " + "on the first attempt, retrying." + ) + elif bucket_name not in bucket_name_list: + error_message = f"Bucket creation failed for {bucket_name}" + ( + ", even after retry." if not retry else "." + ) + logger.error(error_message) + raise BucketOperationError(error_message) + + logger.debug(f"Bucket creation successful for {bucket_name}.") + def bucket_list(self, return_type=list): - # See the all bucket. It will return also other information like ramused, ramsize etc + # See the all bucket. + # It will return also other information like ramused, ramsize etc logger.debug("Finding staged bucket list") env = _BucketMixin.generate_environment_map(self) - command = CommandFactory.bucket_list(**env) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - bucket_list, error, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } + env.update(kwargs[ENV_VAR_KEY]) + # command = CommandFactory.bucket_list(**env) + command, env_vars = CommandFactory.bucket_list_expect(**env) + kwargs[ENV_VAR_KEY].update(env_vars) + logger.debug("list bucket {}".format(command)) + bucket_list, error, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) + logger.debug("list bucket output{}".format(bucket_list)) if return_type == list: - bucket_list = bucket_list.split("\n") - logger.debug("Bucket details in staged environment: {}".format(bucket_list)) - return bucket_list + # bucket_list = bucket_list.split("\n") + if bucket_list == "[]" or bucket_list is None: + logger.debug("empty list") + return [] + else: + logger.debug("clean up json") + bucket_list = bucket_list.replace("u'", "'") + bucket_list = bucket_list.replace("'", '"') + bucket_list = bucket_list.replace("True", '"True"') + bucket_list = bucket_list.replace("False", '"False"') + logger.debug("parse json") + bucket_list_dict = json.loads(bucket_list) + logger.debug("remap json") + bucket_list_dict = list( + map(helper_lib.remap_bucket_json, bucket_list_dict) + ) + logger.debug( + "Bucket details in staged environment: {}".format(bucket_list) + ) + return bucket_list_dict + + def move_bucket(self, bucket_name, direction): + logger.debug("Rename folder") + + if direction == "save": + src = join( + self.virtual_source.parameters.mount_path, "data", bucket_name + ) + dst = join( + self.virtual_source.parameters.mount_path, + "data", + ".{}.delphix".format(bucket_name), + ) + command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) + logger.debug("rename command: {}".format(command)) + utilities.execute_bash(self.connection, command) + elif direction == "restore": + dst = join( + self.virtual_source.parameters.mount_path, "data", bucket_name + ) + src = join( + self.virtual_source.parameters.mount_path, + "data", + ".{}.delphix".format(bucket_name), + ) + command = CommandFactory.delete_dir(dst, self.need_sudo, self.uid) + logger.debug("delete command: {}".format(command)) + utilities.execute_bash(self.connection, command) + command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) + logger.debug("rename command: {}".format(command)) + utilities.execute_bash(self.connection, command) def monitor_bucket(self, bucket_name, staging_UUID): # To monitor the replication - logger.debug("Monitoring the replication for bucket {} ".format(bucket_name)) - kwargs = {ENV_VAR_KEY: {'password': self.staged_source.parameters.xdcr_admin_password}} - command = CommandFactory.monitor_replication(source_username=self.staged_source.parameters.xdcr_admin, - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - bucket_name=bucket_name, uuid=staging_UUID) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + logger.debug( + "Monitoring the replication for bucket {} ".format(bucket_name) + ) + kwargs = { + ENV_VAR_KEY: { + "password": self.staged_source.parameters.xdcr_admin_password + } + } + env = kwargs[ENV_VAR_KEY] + command, env_vars = CommandFactory.monitor_replication_expect( + source_username=self.staged_source.parameters.xdcr_admin, + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + bucket_name=bucket_name, + uuid=staging_UUID, + **env, + ) + kwargs[ENV_VAR_KEY].update(env_vars) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) logger.debug("stdout: {}".format(stdout)) content = json.loads(stdout) - pending_docs = self._get_last_value_of_node_stats(content["nodeStats"].values()[0]) + pending_docs = self._get_last_value_of_node_stats( + list(content["nodeStats"].values())[0] + ) while pending_docs != 0: - logger.debug("Documents pending for replication: {}".format(pending_docs)) + logger.debug( + "Documents pending for replication: {}".format(pending_docs) + ) helper_lib.sleepForSecond(30) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) content = json.loads(stdout) - pending_docs = self._get_last_value_of_node_stats(content["nodeStats"].values()[0]) + pending_docs = self._get_last_value_of_node_stats( + list(content["nodeStats"].values())[0] + ) else: - logger.debug("Replication for bucket {} completed".format(bucket_name)) + logger.debug( + "Replication for bucket {} completed".format(bucket_name) + ) @staticmethod def _get_last_value_of_node_stats(content_list): """ :param content_list: - :return: last node value, if the list is defined. it the list is empty return 0 + :return: last node value, if the list is defined. it the list is empty + return 0 """ value = 0 if len(content_list) > 0: @@ -150,7 +326,11 @@ def __validate_bucket_name(name): @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, 'username': self.parameters.couchbase_admin} + env = { + "shell_path": self.repository.cb_shell_path, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env diff --git a/src/controller/couchbase_lib/_cb_backup.py b/src/controller/couchbase_lib/_cb_backup.py index 4abb7d2..6eab7b5 100644 --- a/src/controller/couchbase_lib/_cb_backup.py +++ b/src/controller/couchbase_lib/_cb_backup.py @@ -1,63 +1,159 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """This class contains methods for all cb backup manager. -This is child class of Resource and parent class of CouchbaseOperation +This is child class of Resource and parent class of CouchbaseOperation. """ -####################################################################################################################### +import json + +############################################################################## import logging -from utils import utilities +import os +from datetime import datetime + from controller import helper_lib from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource -from db_commands.constants import ENV_VAR_KEY -from db_commands.commands import CommandFactory +from dlpx.virtualization.platform.exceptions import UserError logger = logging.getLogger(__name__) class _CBBackupMixin(Resource, MixinInterface): - def __init__(self, builder): super(_CBBackupMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'base_path': helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), - 'hostname': self.connection.environment.host.name, 'port': self.parameters.couchbase_port, - 'username': self.parameters.couchbase_admin - } + env = { + "base_path": helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env - # Defined for future updates - def get_indexes_name(self, index_name): - logger.debug("Finding indexes....") - env = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - env = _CBBackupMixin.generate_environment_map(self) - cmd = CommandFactory.get_indexes_name(**env) - logger.debug("env detail is : ".format(env)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env) - logger.debug("Indexes are {}".format(command_output)) - return command_output - - # Defined for future updates - def build_index(self, index_name): - logger.debug("Building indexes....") - env = _CBBackupMixin.generate_environment_map(self) - cmd = CommandFactory.build_index(index_name=index_name, **env) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env) - logger.debug("command_output is ".format(command_output)) - return command_output + def check_and_update_archive_path(self, check_file=False): + folder_name = self.parameters.archive_name + if self.parameters.archive_name == "": + command_output, std_err, exit_code = self.run_os_command( + os_command="os_ls", dir_path=self.parameters.couchbase_bak_loc + ) + logger.debug(f"command_output={command_output}") + datetime_object_list = [] + for archive_name in command_output.split("\n"): + # archive name in format 20230810010001 + try: + datetime_archive_name = datetime.strptime( + archive_name, "%Y%m%d%H%M%S" + ) + datetime_object_list.append(datetime_archive_name) + except ValueError: + logger.debug( + f"Cannot convert {archive_name} into " + f"%Y%m%d%H%M%S format." + ) + if not datetime_object_list: + raise UserError( + f"No valid backups found in %Y%m%d%H%M%S " + f"format in directory " + f"{self.parameters.couchbase_bak_loc}." + ) + else: + max_date = max(datetime_object_list) + folder_name = max_date.strftime("%Y%m%d%H%M%S") + logger.debug( + f"maximum date = {max_date}, " f"folder_name={folder_name}" + ) + file_data = "" + if check_file: + backup_restore_filename = os.path.join( + self.parameters.mount_path, + ".delphix/backup_restore.txt", + ) + check_file_stdout, _, exit_code = self.run_os_command( + os_command="check_file", + file_path=backup_restore_filename, + ) + + if exit_code == 0 and "Found" in check_file_stdout: + file_data, _, _ = self.run_os_command( + os_command="cat", path=backup_restore_filename + ) + file_data = file_data.strip() + + if file_data == folder_name: + raise UserError("No new backups found....exiting snapshot") + else: + self.parameters.archive_name = folder_name + return folder_name def cb_backup_full(self, csv_bucket): logger.debug("Starting Restore via Backup file...") logger.debug("csv_bucket_list: {}".format(csv_bucket)) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - env = _CBBackupMixin.generate_environment_map(self) - cmd = CommandFactory.cb_backup_full(backup_location=self.parameters.couchbase_bak_loc, - csv_bucket_list=csv_bucket, - backup_repo=self.parameters.couchbase_bak_repo, **env) - utilities.execute_bash(self.connection, cmd, **kwargs) + + skip = "--disable-analytics" + + if not self.parameters.fts_service: + skip = skip + " {} {} ".format( + "--disable-ft-indexes", "--disable-ft-alias" + ) + + if not self.parameters.eventing_service: + skip = skip + " {} ".format("--disable-eventing") + + logger.debug("skip backup is set to: {}".format(skip)) + map_data_list = [] + if int(self.repository.version.split(".")[0]) >= 7: + for bucket_name in csv_bucket.split(","): + logger.debug(f"bucket_name: {bucket_name}") + stdout, _, _ = self.run_couchbase_command( + couchbase_command="get_scope_list_expect", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + bucket_name=bucket_name, + ) + json_scope_data = json.loads(stdout) + for s in json_scope_data["scopes"]: + scope_name = s["name"] + if scope_name == "_default": + continue + collection_list = s["collections"] + for c in collection_list: + collection_name = c["name"] + if collection_name == "_default": + continue + map_data_list.append( + f"{bucket_name}.{scope_name}.{collection_name}=" + f"{bucket_name}.{scope_name}.{collection_name}" + ) + + stdout, stderr, exit_code = self.run_couchbase_command( + couchbase_command="cb_backup_full", + backup_location=os.path.join( + self.parameters.couchbase_bak_loc, self.parameters.archive_name + ), + csv_bucket_list=csv_bucket, + backup_repo=self.parameters.couchbase_bak_repo, + skip=skip, + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + map_data=",".join(map_data_list), + repo_version=self.repository.version, + ) + + if exit_code != 0: + raise UserError( + "Problem with restoring backup using cbbackupmgr", + "Check if repo and all privileges are correct", + "stdout: {}, stderr: {}, exit_code: {}".format( + stdout, stderr, exit_code + ), + ) diff --git a/src/controller/couchbase_lib/_cluster.py b/src/controller/couchbase_lib/_cluster.py index 89a8461..d12066a 100644 --- a/src/controller/couchbase_lib/_cluster.py +++ b/src/controller/couchbase_lib/_cluster.py @@ -1,42 +1,48 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ This class contains methods for cluster related operations -This is child class of Resource and parent class of CouchbaseOperation +This is child class of Resource and parent class of CouchbaseOperation. """ -####################################################################################################################### +############################################################################## import logging import re -from utils import utilities -from db_commands.commands import CommandFactory + from controller.couchbase_lib._mixin_interface import MixinInterface -from db_commands.constants import ENV_VAR_KEY +from controller.helper_lib import sleepForSecond from controller.resource_builder import Resource +from db_commands.commands import CommandFactory +from db_commands.constants import ENV_VAR_KEY +from utils import utilities logger = logging.getLogger(__name__) # Error string on which we have to skip without raising the Exception -ALREADY_CLUSTER_INIT = "Cluster is already initialized, use setting-cluster to change settings" +ALREADY_CLUSTER_INIT = ( + "Cluster is already initialized, use setting-cluster to change settings" +) class _ClusterMixin(Resource, MixinInterface): - def __init__(self, builder): super(_ClusterMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, 'username': self.parameters.couchbase_admin, - 'cluster_ramsize': self.parameters.cluster_ram_size, - 'cluster_index_ramsize': self.parameters.cluster_index_ram_size, - 'cluster_fts_ramsize': self.parameters.cluster_ftsram_size, - 'cluster_eventing_ramsize': self.parameters.cluster_eventing_ram_size, - 'cluster_analytics_ramsize': self.parameters.cluster_analytics_ram_size - } + env = { + "shell_path": self.repository.cb_shell_path, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + "cluster_ramsize": self.parameters.cluster_ram_size, + "cluster_index_ramsize": self.parameters.cluster_index_ram_size, + "cluster_fts_ramsize": self.parameters.cluster_ftsram_size, + "cluster_eventing_ramsize": self.parameters.cluster_eventing_ram_size, # noqa E501 + "cluster_analytics_ramsize": self.parameters.cluster_analytics_ram_size, # noqa E501 + } # MixinInterface.read_map(env) return env @@ -52,43 +58,84 @@ def cluster_init(self): # Cluster initialization logger.debug("Cluster Initialization started") fts_service = self.parameters.fts_service - analytics_service = self.parameters.analytics_service + # analytics_service = self.parameters.analytics_service eventing_service = self.parameters.eventing_service cluster_name = self._get_cluster_name() - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } additional_service = "query" if fts_service: additional_service = additional_service + ",fts" - if analytics_service: - additional_service = additional_service + ",analytics" + # if analytics_service: + # additional_service = additional_service + ",analytics" if eventing_service: additional_service = additional_service + ",eventing" logger.debug("additional services : {}".format(additional_service)) - lambda_expr = lambda output: bool(re.search(ALREADY_CLUSTER_INIT, output)) + + def f(): + return lambda output: bool(re.search(ALREADY_CLUSTER_INIT, output)) + + lambda_expr = f env = _ClusterMixin.generate_environment_map(self) - env['additional_services'] = additional_service - cmd = CommandFactory.cluster_init(cluster_name=cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command_name=cmd, callback_func=lambda_expr, - **kwargs) + env["additional_services"] = additional_service + if int(self.repository.version.split(".")[0]) >= 7: + env.update(kwargs[ENV_VAR_KEY]) + if "(CE)" in self.repository.version: + env["cluster_eventing_ramsize"] = None + env["cluster_analytics_ramsize"] = None + env["indexerStorageMode"] = "forestdb" + else: + env["indexerStorageMode"] = "plasma" + cmd, env_vars = CommandFactory.cluster_init_rest_expect( + cluster_name=cluster_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) + else: + cmd = CommandFactory.cluster_init(cluster_name=cluster_name, **env) + logger.debug("Cluster init: {}".format(cmd)) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, + command_name=cmd, + callback_func=lambda_expr, + **kwargs + ) if re.search(r"ERROR", str(stdout)): if re.search(r"ERROR: Cluster is already initialized", stdout): - logger.debug("Performing cluster setting as cluster is already initialized") + logger.debug( + "Performing cluster setting as cluster is already " + "initialized" + ) self.cluster_setting() else: logger.error("Cluster init failed. Throwing exception") raise Exception(stdout) else: logger.debug("Cluster init succeeded") + + # here we should wait for indexer to start + sleepForSecond(10) return [stdout, stderr, exit_code] def cluster_setting(self): logger.debug("Cluster setting process has started") - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } cluster_name = self._get_cluster_name() env = _ClusterMixin.generate_environment_map(self) - cmd = CommandFactory.cluster_setting(cluster_name=cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) + env.update(kwargs[ENV_VAR_KEY]) + cmd, env_vars = CommandFactory.cluster_setting_expect( + cluster_name=cluster_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if re.search(r"ERROR", str(stdout)): logger.error("Cluster modification failed, killing the execution") raise Exception(stdout) diff --git a/src/controller/couchbase_lib/_mixin_interface.py b/src/controller/couchbase_lib/_mixin_interface.py index 2cee27e..6cc9a35 100644 --- a/src/controller/couchbase_lib/_mixin_interface.py +++ b/src/controller/couchbase_lib/_mixin_interface.py @@ -1,28 +1,30 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -# This is interface which must be included by all child class of Resource +# This is interface which must be included by all child class of Resource. # This is child class of Resource and parent class of CouchbaseOperation # Therefore child class has to implement the method generate_environment_map -# Mixin class(Class which is implementing this interface) created only in two cases: +# Mixin class(Class which is implementing this interface) created only in +# two cases: # 1-> Bunch of methods belonging to one group # 2-> Environment data is common for all the commands # For case #1, it's about practice we should follow in software development -# For case #2, if such kind of cases are there in which common env data is required in execution of multiple commands -# then we club them in one class. Implement 'generate_environment_map' method and let it used by all methods defined in +# For case #2, if such kind of cases are there in which common env data is +# required in execution of multiple commands +# then we club them in one class. Implement 'generate_environment_map' method +# and let it used by all methods defined in # class. -# Other benefits are: Can call read_map to read each env data, Handling of attribute error while generating the env data +# Other benefits are: Can call read_map to read each env data, Handling of +# attribute error while generating the env data import logging - logger = logging.getLogger(__name__) class MixinInterface(object): - def generate_environment_map(self): raise Exception("You need to implement this method in child class") @@ -36,6 +38,10 @@ def inner(*args, **kwargs): try: return function(*args, **kwargs) except AttributeError as AE: - logger.debug("Failed to read value from schema objects. Error: {}".format(AE.message)) + logger.debug( + "Failed to read value from schema objects. " + "Error: {}".format(str(AE)) + ) raise + return inner diff --git a/src/controller/couchbase_lib/_replication.py b/src/controller/couchbase_lib/_replication.py deleted file mode 100644 index f23a1aa..0000000 --- a/src/controller/couchbase_lib/_replication.py +++ /dev/null @@ -1,169 +0,0 @@ -# -# Copyright (c) 2020 by Delphix. All rights reserved. -# -####################################################################################################################### -""" -This class contains methods for replication related operations -This is child class of Resource and parent class of CouchbaseOperation -""" -####################################################################################################################### - -from utils import utilities -import re -import logging -from db_commands.commands import CommandFactory -from controller.couchbase_lib._mixin_interface import MixinInterface -from db_commands.constants import ENV_VAR_KEY -from controller.resource_builder import Resource -logger = logging.getLogger(__name__) - - -class _ReplicationMixin(Resource, MixinInterface): - - def __init__(self, builder): - super(_ReplicationMixin, self).__init__(builder) - - @MixinInterface.check_attribute_error - def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, 'source_hostname': self.source_config.couchbase_src_host, - 'source_port': self.source_config.couchbase_src_port, 'source_username': self.parameters.xdcr_admin} - # MixinInterface.read_map(env) - return env - - def get_replication_uuid(self): - # False for string - logger.debug("Finding the replication uuid through host name") - is_ip_or_string = False - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - cluster_name = self.parameters.stg_cluster_name - env = _ReplicationMixin.generate_environment_map(self) - cmd = CommandFactory.get_replication_uuid(**env) - try: - - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - if stdout is None or stdout == "": - logger.debug("No Replication ID identified") - return None, None - logger.debug("xdcr remote references : {}".format(stdout)) - hostname = self.connection.environment.host.name - logger.debug("Environment hostname {}".format(hostname)) - host_ip = "" - if not re.search(r"{}".format(hostname), stdout): - logger.debug("cluster for hostname {} doesn't exist".format(hostname)) - logger.debug("Finding the ip for this host") - host_ip = self.get_ip() - logger.debug("Finding the replication uuid through host ip") - if not re.search(r"{}".format(host_ip), stdout): - logger.debug("cluster for host_ip {} doesn't exist".format(hostname)) - return None, None - else: - is_ip_or_string = True - logger.debug("cluster for host_ip {} exist".format(host_ip)) - else: - logger.debug("cluster for hostname {} exist".format(host_ip)) - if is_ip_or_string == False: - uuid = re.search(r"uuid:.*(?=\s.*{})".format(hostname), stdout).group() - else: - uuid = re.search(r"uuid:.*(?=\s.*{})".format(host_ip), stdout).group() - - uuid = uuid.split(":")[1].strip() - cluster_name_staging = re.search(r"cluster name:.*(?=\s.*{})".format(uuid), stdout).group() - cluster_name_staging = cluster_name_staging.split(":")[1].strip() - logger.debug("uuid for {} cluster : {}".format(uuid, cluster_name_staging)) - if cluster_name_staging == cluster_name: - return uuid, cluster_name - else: - return uuid, cluster_name_staging - except Exception as err: - logger.warn("Error identified: {} ".format(err.message)) - logger.warn("UUID is None. Not able to find any cluster") - return None, None - - def get_stream_id(self): - logger.debug("Finding the stream id for provided cluster name") - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - uuid, cluster_name = self.get_replication_uuid() - if uuid is None: - return None, None - env = _ReplicationMixin.generate_environment_map(self) - cmd = CommandFactory.get_stream_id(cluster_name=cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - if stdout is None or stdout == "": - logger.debug("No stream ID identified") - return None, None - else: - stream_id = re.findall(r"(?<=stream id: ){}.*".format(uuid), stdout) - logger.debug("Stream id found: {}".format(stream_id)) - return stream_id, cluster_name - - def pause_replication(self): - logger.debug("Pausing replication ...") - stream_id, cluster_name = self.get_stream_id() - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - env = _ReplicationMixin.generate_environment_map(self) - for replication_id in stream_id: - cmd = CommandFactory.pause_replication(cluster_name=cluster_name, id=replication_id, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - logger.debug(stdout) - - def resume_replication(self): - logger.debug("Resuming replication ...") - stream_id, cluster_name = self.get_stream_id() - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - env = _ReplicationMixin.generate_environment_map(self) - for s_id in stream_id: - cmd = CommandFactory.resume_replication(cluster_name=cluster_name, id=s_id , **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - logger.debug(stdout) - - def delete_replication(self): - logger.debug("Deleting replication...") - stream_id, cluster_name = self.get_stream_id() - logger.debug("stream_id: {} and cluster_name : {}".format(stream_id, cluster_name)) - if stream_id is None or stream_id == "": - logger.debug("No Replication is found to delete.") - return False, cluster_name - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - env = _ReplicationMixin.generate_environment_map(self) - - for id in stream_id: - cmd = CommandFactory.delete_replication(cluster_name=cluster_name, id=id, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - if exit_code != 0: - logger.warn("stream_id: {} deletion failed".format(id)) - else: - logger.debug("stream_id: {} deletion succeeded".format(id)) - return True, cluster_name - - def get_ip(self): - cmd = CommandFactory.get_ip_of_hostname() - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd) - logger.debug("IP is {}".format(stdout)) - return stdout - - def check_duplicate_replication(self, cluster_name): - logger.debug("Searching cluster name") - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} - env = _ReplicationMixin.generate_environment_map(self) - cmd = CommandFactory.get_replication_uuid(**env) - try: - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) - all_clusters = re.findall(r'cluster name:.*', stdout) - stream_id, cluster = self.get_stream_id() - logger.debug("stream_id:{} and cluster s:{} ".format(stream_id, cluster)) - if stream_id: - # cluster is already set up between these nodes# No setup and no mis match - logger.debug("Already XDCR set up have been between source and staging server") - return True, False - logger.debug("No XDCR for staging host. Now validating the cluster name... ") - for each_cluster_pair in all_clusters: - each_cluster = each_cluster_pair.split(':')[1].strip() - logger.debug("Listed cluster: {} and input is:{} ".format(each_cluster, cluster_name)) - if each_cluster == cluster_name: - logger.debug("Duplicate cluster name issue identified ") - # no setup but mismatch - return False, True - return False, False - - except Exception as err: - logger.debug("Failed to verify the duplicate name: {} ".format(err.message)) diff --git a/src/controller/couchbase_lib/_xdcr.py b/src/controller/couchbase_lib/_xdcr.py index 6effcb2..1da19bb 100644 --- a/src/controller/couchbase_lib/_xdcr.py +++ b/src/controller/couchbase_lib/_xdcr.py @@ -1,51 +1,68 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This class contains methods for XDCR related operations +This class contains methods for XDCR related operations. This is child class of Resource and parent class of CouchbaseOperation """ -####################################################################################################################### +import json + +############################################################################## import logging -from utils import utilities import re + from controller import helper_lib -from db_commands.commands import CommandFactory from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource - +from db_commands.commands import CommandFactory from db_commands.constants import ENV_VAR_KEY +from dlpx.virtualization.platform.exceptions import UserError +from utils import utilities logger = logging.getLogger(__name__) class _XDCrMixin(Resource, MixinInterface): - def __init__(self, builder): super(_XDCrMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, - 'source_hostname': self.source_config.couchbase_src_host, - 'source_port': self.source_config.couchbase_src_port, - 'source_username': self.parameters.xdcr_admin, - 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, - 'username': self.parameters.couchbase_admin - } + env = { + "shell_path": self.repository.cb_shell_path, + "source_hostname": self.source_config.couchbase_src_host, + "source_port": self.source_config.couchbase_src_port, + "source_username": self.parameters.xdcr_admin, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env def xdcr_delete(self, cluster_name): - logger.debug("XDCR deletion for cluster_name {} has started ".format(cluster_name)) - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password, - 'password': self.parameters.couchbase_admin_password}} + logger.debug( + "XDCR deletion for cluster_name {} has started ".format( + cluster_name + ) + ) + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password, + "password": self.parameters.couchbase_admin_password, + } + } env = _XDCrMixin.generate_environment_map(self) - cmd = CommandFactory.xdcr_delete(cluster_name=cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) + env.update(kwargs[ENV_VAR_KEY]) + cmd, env_vars = CommandFactory.xdcr_delete_expect( + cluster_name=cluster_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if exit_code != 0: logger.error("XDCR Setup deletion failed") if stdout: @@ -58,25 +75,310 @@ def xdcr_delete(self, cluster_name): def xdcr_setup(self): logger.debug("Started XDCR set up ...") - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password, - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password, + "password": self.parameters.couchbase_admin_password, + } + } env = _XDCrMixin.generate_environment_map(self) - cmd = CommandFactory.xdcr_setup(cluster_name=self.parameters.stg_cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) + env.update(kwargs[ENV_VAR_KEY]) + cmd, env_vars = CommandFactory.xdcr_setup_expect( + cluster_name=self.parameters.stg_cluster_name, **env + ) + kwargs[ENV_VAR_KEY].update(env_vars) + utilities.execute_expect(self.connection, cmd, **kwargs) helper_lib.sleepForSecond(3) def xdcr_replicate(self, src, tgt): try: logger.debug("Started XDCR replication for bucket {}".format(src)) - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password + } + } env = _XDCrMixin.generate_environment_map(self) - cmd = CommandFactory.xdcr_replicate(source_bucket_name=src, target_bucket_name=tgt, - cluster_name=self.parameters.stg_cluster_name, **env) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) + env.update(kwargs[ENV_VAR_KEY]) + cmd, env_vars = CommandFactory.xdcr_replicate_expect( + source_bucket_name=src, + target_bucket_name=tgt, + cluster_name=self.parameters.stg_cluster_name, + **env, + ) + kwargs[ENV_VAR_KEY].update(env_vars) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if exit_code != 0: logger.debug("XDCR replication create failed") raise Exception(stdout) logger.debug("{} : XDCR replication create succeeded".format(tgt)) helper_lib.sleepForSecond(2) except Exception as e: - logger.debug("XDCR error {}".format(e.message)) + logger.debug("XDCR error {}".format(str(e))) + + def get_replication_uuid(self): + # False for string + logger.debug("Finding the replication uuid through host name") + cluster_name = self.parameters.stg_cluster_name + + stdout, stderr, exit_code = self.run_couchbase_command( + "get_replication_uuid", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + ) + + if exit_code != 0 or stdout is None or stdout == "": + logger.debug("No Replication ID identified") + return None + + try: + + logger.debug("xdcr remote references : {}".format(stdout)) + stg_hostname = self.connection.environment.host.name + logger.debug("Environment hostname {}".format(stg_hostname)) + # it can have more than single IP address + host_ips = self.get_ip() + # conver output into variables + clusters = {} + l = stdout.split("\n") + while l: + line = l.pop(0) + g = re.match(r"\s*cluster name:\s(\S*)", line) + if g: + xdrc_cluster_name = g.group(1) + uuid = re.match(r"\s*uuid:\s(\S*)", l.pop(0)).group(1) + hostname = re.match( + r"\s*host name:\s(\S*):(\d*)", l.pop(0) + ).group(1) + clusters[xdrc_cluster_name.lower()] = { + "hostname": hostname, + "uuid": uuid, + } + + # check if a cluster name is really connected to staging - + # just in case + + if cluster_name.lower() in clusters: + logger.debug( + "Cluster {} found in xdrc-setup output".format( + cluster_name + ) + ) + # check if hostname returned from source match hostname or + # IP's of staging server + + logger.debug(stg_hostname) + logger.debug(clusters[cluster_name.lower()]["hostname"]) + + if stg_hostname == clusters[cluster_name.lower()]["hostname"]: + # hostname matched + logger.debug( + "Cluster {} hostname {} is matching staging server " + "hostname".format(cluster_name, stg_hostname) + ) + uuid = clusters[cluster_name.lower()]["uuid"] + else: + # check for IP's + logger.debug("Checking for IP match") + + logger.debug(clusters[cluster_name.lower()]) + + if clusters[cluster_name.lower()]["hostname"] in host_ips: + # ip matched + logger.debug( + "Cluster {} IP {} is matching staging server IPs " + "{}".format( + cluster_name, + clusters[cluster_name.lower()]["hostname"], + host_ips, + ) + ) + uuid = clusters[cluster_name.lower()]["uuid"] + else: + logger.debug( + "Can't confirm that xdrc-setup is matching staging" + ) + raise UserError( + "XDRC Remote cluster {} on the source server " + "is not pointed to staging server".format( + cluster_name + ), + "Please check and delete remote cluster " + "definition", + clusters[cluster_name.lower()], + ) + + else: + logger.debug( + "Cluster {} configuration not found in XDCR of " + "source".format(cluster_name) + ) + return None + + logger.debug("uuid for {} cluster : {}".format(uuid, cluster_name)) + return uuid + + except UserError: + raise + except Exception as err: + logger.warn("Error identified: {} ".format(str(err))) + logger.warn("UUID is None. Not able to find any cluster") + return None + + def get_stream_id(self): + logger.debug("Finding the stream id for provided cluster name") + uuid = self.get_replication_uuid() + if uuid is None: + return None + cluster_name = self.parameters.stg_cluster_name + + stdout, stderr, exit_code = self.run_couchbase_command( + "get_stream_id", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + cluster_name=cluster_name, + ) + + logger.debug(stdout) + logger.debug(uuid) + if exit_code != 0 or stdout is None or stdout == "": + logger.debug("No stream ID identified") + return None + else: + stream_id = re.findall( + r"(?<=stream id:\s){}.*".format(uuid), stdout + ) + logger.debug("Stream id found: {}".format(stream_id)) + return stream_id + + def delete_replication(self): + logger.debug("Deleting replication...") + stream_id = self.get_stream_id() + cluster_name = self.parameters.stg_cluster_name + logger.debug( + "stream_id: {} and cluster_name : {}".format( + stream_id, cluster_name + ) + ) + if stream_id is None or stream_id == "": + logger.debug("No Replication is found to delete.") + return False, cluster_name + + for id in stream_id: + stdout, stderr, exit_code = self.run_couchbase_command( + "delete_replication", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + cluster_name=cluster_name, + id=id, + ) + + if exit_code != 0: + logger.warn("stream_id: {} deletion failed".format(id)) + else: + logger.debug("stream_id: {} deletion succeeded".format(id)) + return True, cluster_name + + def get_ip(self): + cmd = CommandFactory.get_ip_of_hostname() + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, cmd + ) + logger.debug("IP is {}".format(stdout)) + return stdout.split() + + def setup_replication(self): + uuid = self.get_replication_uuid() + + if uuid is None: + logger.info("Setting up XDRC remote cluster") + self.xdcr_setup() + + streams_id = self.get_stream_id() + + if streams_id is not None: + alredy_replicated_buckets = [ + m.group(1) + for m in (re.match(r"\S*/(\S*)/\S*", x) for x in streams_id) + if m + ] + else: + alredy_replicated_buckets = [] + + config_setting = self.staged_source.parameters.config_settings_prov + + if len(config_setting) > 0: + bucket_list = [ + config_bucket["bucketName"] for config_bucket in config_setting + ] + else: + bucket_details_source = self.source_bucket_list() + bucket_list = helper_lib.filter_bucket_name_from_json( + bucket_details_source + ) + + logger.debug("Bucket list to create replication for") + logger.debug(bucket_list) + logger.debug("Already replicated buckets") + logger.debug(alredy_replicated_buckets) + + for bkt_name in bucket_list: + if bkt_name not in alredy_replicated_buckets: + if int(self.repository.version.split(".")[0]) >= 7: + logger.debug(f"bucket_name: {bkt_name}") + stdout, _, _ = self.run_couchbase_command( + couchbase_command="get_scope_list_expect", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + hostname=self.source_config.couchbase_src_host, + port=self.source_config.couchbase_src_port, + username=self.staged_source.parameters.xdcr_admin, + password=self.staged_source.parameters.xdcr_admin_password, # noqa E501 + bucket_name=bkt_name, + ) + json_scope_data = json.loads(stdout) + logger.debug(f"json_scope_data={json_scope_data}") + for s in json_scope_data["scopes"]: + scope_name = s["name"] + if scope_name == "_default": + continue + # create scope + self.run_couchbase_command( + couchbase_command="create_scope_expect", + base_path=helper_lib.get_base_directory_of_given_path( # noqa E501 + self.repository.cb_shell_path + ), + scope_name=scope_name, + bucket_name=bkt_name, + ) + collection_list = s["collections"] + for c in collection_list: + collection_name = c["name"] + if collection_name == "_default": + continue + # create collection + self.run_couchbase_command( + couchbase_command="create_collection_expect", + base_path=helper_lib.get_base_directory_of_given_path( # noqa E501 + self.repository.cb_shell_path + ), + scope_name=scope_name, + bucket_name=bkt_name, + collection_name=collection_name, + ) + + logger.debug("Creating replication for {}".format(bkt_name)) + self.xdcr_replicate(bkt_name, bkt_name) + else: + logger.debug( + "Bucket {} replication already configured".format(bkt_name) + ) diff --git a/src/controller/couchbase_operation.py b/src/controller/couchbase_operation.py index c300ade..15842e2 100644 --- a/src/controller/couchbase_operation.py +++ b/src/controller/couchbase_operation.py @@ -1,139 +1,496 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2024 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This class defines methods for couchbase operations. Parent classes are: _BucketMixin, _ClusterMixin, _ReplicationMixin, - _XDCrMixin, _CBBackupMixin. Modules name is explaining about the operations for which module is created for. -The constructor of this class expects a `builder` on which each database operation will be performed -Commands are defined for each method in module commands.py. To perform any delphix operation we need to create -the object of this class. This class is single connector between other modules and `controller` package. +This class defines methods for couchbase operations. Parent classes are: +_BucketMixin, _ClusterMixin, + _XDCrMixin, _CBBackupMixin. Modules name is explaining about the operations + for which module is created for. +The constructor of this class expects a `builder` on which each database +operation will be performed +Commands are defined for each method in module commands.py. To perform any +delphix operation we need to create +the object of this class. This class is single connector between other modules +and `controller` package """ -####################################################################################################################### +############################################################################## -import re +import json import logging import os -import sys - -from dlpx.virtualization.platform import Status +import re +import time -from internal_exceptions.database_exceptions import CouchbaseServicesError -from utils import utilities -from controller.resource_builder import Resource from controller import helper_lib from controller.couchbase_lib._bucket import _BucketMixin +from controller.couchbase_lib._cb_backup import _CBBackupMixin from controller.couchbase_lib._cluster import _ClusterMixin -from controller.couchbase_lib._replication import _ReplicationMixin from controller.couchbase_lib._xdcr import _XDCrMixin -from controller.couchbase_lib._cb_backup import _CBBackupMixin +from controller.helper_lib import remap_bucket_json +from controller.resource_builder import Resource +from db_commands import constants from db_commands.commands import CommandFactory -from db_commands.constants import ENV_VAR_KEY, StatusIsActive, DELPHIX_HIDDEN_FOLDER, CONFIG_FILE_NAME -import time +from db_commands.constants import CONFIG_FILE_NAME +from db_commands.constants import DELPHIX_HIDDEN_FOLDER +from db_commands.constants import StatusIsActive +from dlpx.virtualization.platform import Status +from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.database_exceptions import CouchbaseServicesError +from utils import utilities logger = logging.getLogger(__name__) -class CouchbaseOperation(_BucketMixin, _ClusterMixin, _ReplicationMixin, _XDCrMixin, _CBBackupMixin): - - def __init__(self, builder): +class CouchbaseOperation( + _BucketMixin, _ClusterMixin, _XDCrMixin, _CBBackupMixin +): + def __init__(self, builder, node_connection=None): """ - Main class through which other modules can run databases operations on provided parameters - :param builder: builder object which contains all necessary parameters on which db methods will be executed + Main class through which other modules can run databases operations on + provided parameters + :param builder: builder object which contains all necessary parameters + on which db methods will be executed + :param node_connection: connection to node, if this is not a default + one """ + logger.debug("Object initialization") # Initializing the parent class constructor super(CouchbaseOperation, self).__init__(builder) - def restart_couchbase(self): + if node_connection is not None: + self.connection = node_connection + + self.__need_sudo = helper_lib.need_sudo( + self.connection, self.repository.uid, self.repository.gid + ) + self.__uid = self.repository.uid + self.__gid = self.repository.gid + + @property + def need_sudo(self): + return self.__need_sudo + + @property + def uid(self): + return self.__uid + + @property + def gid(self): + return self.__gid + + def run_couchbase_command(self, couchbase_command, **kwargs): + logger.debug("run_couchbase_command") + logger.debug("couchbase_command: {}".format(couchbase_command)) + if "password" in kwargs: + password = kwargs.get("password") + else: + password = self.parameters.couchbase_admin_password + kwargs["password"] = password + + if "username" in kwargs: + username = kwargs.pop("username") + else: + username = self.parameters.couchbase_admin + + if "hostname" in kwargs: + hostname = kwargs.pop("hostname") + else: + hostname = self.connection.environment.host.name + + if "port" in kwargs: + port = kwargs.pop("port") + else: + port = self.parameters.couchbase_port + + env = {"password": password} + + if "newpass" in kwargs: + # for setting a new password + env["newpass"] = kwargs.get("newpass") + + if "source_password" in kwargs: + env["source_password"] = kwargs.get("source_password") + + autoparams = [ + "shell_path", + "install_path", + "username", + "port", + "sudo", + "uid", + "hostname", + ] + + new_kwargs = {k: v for k, v in kwargs.items() if k not in autoparams} + if couchbase_command not in [ + "get_server_list", + "couchbase_server_info", + "cb_backup_full", + "build_index", + "check_index_build", + "get_source_bucket_list", + "get_replication_uuid", + "get_stream_id", + "delete_replication", + "node_init", + "get_indexes_name", + "rename_cluster", + "server_add", + "rebalance", + "get_scope_list_expect", + "change_cluster_password", + "create_scope_expect", + "create_collection_expect", + ]: + method_to_call = getattr(CommandFactory, couchbase_command) + command = method_to_call( + shell_path=self.repository.cb_shell_path, + install_path=self.repository.cb_install_path, + username=username, + port=port, + sudo=self.need_sudo, + uid=self.uid, + hostname=hostname, + **new_kwargs, + ) + + logger.debug("couchbase command to run: {}".format(command)) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, command, environment_vars=env + ) + else: + couchbase_command = ( + couchbase_command + "_expect" + if not couchbase_command.endswith("_expect") + else couchbase_command + ) + logger.debug("new_couchbase_command: {}".format(couchbase_command)) + method_to_call = getattr(CommandFactory, couchbase_command) + command, env_vars = method_to_call( + shell_path=self.repository.cb_shell_path, + install_path=self.repository.cb_install_path, + username=username, + port=port, + sudo=self.need_sudo, + uid=self.uid, + hostname=hostname, + **new_kwargs, + ) + env.update(env_vars) + logger.debug("couchbase command to run: {}".format(command)) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, environment_vars=env + ) + return [stdout, stderr, exit_code] + + def run_os_command(self, os_command, **kwargs): + + method_to_call = getattr(CommandFactory, os_command) + command = method_to_call(sudo=self.need_sudo, uid=self.uid, **kwargs) + + logger.debug("os command to run: {}".format(command)) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, command + ) + logger.debug(f"os_command stdout: {stdout}") + logger.debug(f"os_command stderr: {stderr}") + logger.debug(f"os_command exit_code: {exit_code}") + return [stdout, stderr, exit_code] + + def restart_couchbase(self, provision=False): """stop the couchbase service and then start again""" self.stop_couchbase() - self.start_couchbase() + self.start_couchbase(provision) - def start_couchbase(self): - """ start the couchbase service""" + def start_couchbase(self, provision=False, no_wait=False): + """start the couchbase service""" logger.debug("Starting couchbase services") - command = CommandFactory.start_couchbase(self.repository.cb_install_path) - utilities.execute_bash(self.connection, command) + + self.run_couchbase_command("start_couchbase") server_status = Status.INACTIVE - #Waiting for one minute to start the server - end_time = time.time() + 60 + helper_lib.sleepForSecond(10) - #break the loop either end_time is exceeding from 1 minute or server is successfully started + if no_wait: + logger.debug("no wait - leaving start procedure") + return + + # Waiting for one minute to start the server + # for prox to investigate + end_time = time.time() + 3660 + + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started while time.time() < end_time and server_status == Status.INACTIVE: - helper_lib.sleepForSecond(1) # waiting for 1 second - server_status = self.status() # fetching status + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = self.status(provision) # fetching status + logger.debug("server status {}".format(server_status)) - # if the server is not running even in 60 seconds, then stop the further execution + # if the server is not running even in 60 seconds, then stop the + # further execution if server_status == Status.INACTIVE: - raise CouchbaseServicesError("Have failed to start couchbase server") - + raise CouchbaseServicesError( + "Have failed to start couchbase server" + ) def stop_couchbase(self): - """ stop the couchbase service""" + """stop the couchbase service""" try: logger.debug("Stopping couchbase services") - command = CommandFactory.stop_couchbase(self.repository.cb_install_path) - utilities.execute_bash(self.connection, command) + self.run_couchbase_command("stop_couchbase") + end_time = time.time() + 60 server_status = Status.ACTIVE while time.time() < end_time and server_status == Status.ACTIVE: helper_lib.sleepForSecond(1) # waiting for 1 second server_status = self.status() # fetching status + + logger.debug("Leaving stop loop") if server_status == Status.ACTIVE: - raise CouchbaseServicesError("Have failed to stop couchbase server") + logger.debug("Have failed to stop couchbase server") + raise CouchbaseServicesError( + "Have failed to stop couchbase server" + ) except CouchbaseServicesError as err: + logger.debug("Error: {}".format(err)) raise err except Exception as err: + logger.debug("Exception Error: {}".format(err)) if self.status() == Status.INACTIVE: - logger.debug("Seems like couchbase service is not running. {}".format(err.message)) + logger.debug( + "Seems like couchbase service is not running. {}".format( + str(err) + ) + ) else: - raise CouchbaseServicesError(err.message) + raise CouchbaseServicesError(str(err)) + + def ip_file_name(self): + + ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + check_ip_file, check_ip_file_err, exit_code = self.run_os_command( + os_command="check_file", file_path=ip_file + ) + if not (exit_code == 0 and "Found" in check_ip_file): + ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + logger.debug("IP file is {}".format(ip_file)) + return ip_file + + def staging_bootstrap_status(self): + logger.debug("staging_bootstrap_status") - def status(self): - """Check the server status. Healthy or Warmup could be one status if the server is running""" try: - command = CommandFactory.server_info(self.repository.cb_shell_path, self.connection.environment.host.name, - self.parameters.couchbase_port, self.parameters.couchbase_admin) - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - server_info, std_err, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + server_info_out, std_err, exit_code = self.run_couchbase_command( + couchbase_command="couchbase_server_info", hostname="127.0.0.1" + ) - status = helper_lib.get_value_of_key_from_json(server_info, 'status') + # logger.debug("Status output: {}".format(server_info)) + + status = helper_lib.get_value_of_key_from_json( + server_info_out, "status" + ) if status.strip() == StatusIsActive: logger.debug("Server status is: {}".format("ACTIVE")) return Status.ACTIVE else: logger.debug("Server status is: {}".format("INACTIVE")) return Status.INACTIVE + + except Exception as error: + # TODO + # rewrite it + logger.debug("Exception: {}".format(str(error))) + if re.search("Unable to connect to host at", str(error)): + logger.debug("Couchbase service is not running") + return Status.INACTIVE + + def status(self, provision=False): + """Check the server status. Healthy or Warmup could be one status + if the server is running""" + + logger.debug("checking status") + logger.debug(self.connection) + try: + + if provision: + username = self.snapshot.couchbase_admin + password = self.snapshot.couchbase_admin_password + + else: + password = self.parameters.couchbase_admin_password + username = self.parameters.couchbase_admin + + # TODO + # Check if there is a mount point - even a started Couchbase + # without mountpoint means VDB + # is down or corrupted + # Couchbase with config file can start and recreate empty buckets + # if there is no mount point + # for future version - maybe whole /opt/couchbase/var directory + # should be virtualized like for Docker + # to avoid problems + + logger.debug("Checking for mount points") + mount_point_state = helper_lib.check_server_is_used( + self.connection, self.parameters.mount_path + ) + logger.debug("Status of mount point {}".format(mount_point_state)) + + if mount_point_state == Status.INACTIVE: + logger.error( + "There is no mount point VDB is down regardless " + "Couchbase status" + ) + return Status.INACTIVE + + ip_file = self.ip_file_name() + + read_ip_file, std_err, exit_code = self.run_os_command( + os_command="cat", path=ip_file + ) + + server_info, std_err, exit_code = self.run_couchbase_command( + couchbase_command="get_server_list", + hostname="127.0.0.1", + username=username, + password=password, + ) + + if ( + not self.dSource + and self.parameters.node_list is not None + and len(self.parameters.node_list) > 0 + ): + multinode = True + else: + multinode = False + + for line in server_info.split("\n"): + logger.debug("Checking line: {}".format(line)) + if read_ip_file in line: + logger.debug("Checking IP: {}".format(read_ip_file)) + if "unhealthy" in line: + logger.error("We have unhealthy active node") + return Status.INACTIVE + if "healthy" in line: + logger.debug("We have healthy active node") + return Status.ACTIVE + + if multinode and "warmup" in line: + logger.debug( + "We have starting mode in multinode cluster" + ) + return Status.ACTIVE + + return Status.INACTIVE + except Exception as error: - if re.search("Unable to connect to host at", error.message): + # TODO + # rewrite it + logger.debug("Exception: {}".format(str(error))) + if re.search("Unable to connect to host at", str(error)): logger.debug("Couchbase service is not running") return Status.INACTIVE - def make_directory(self, directory_path): + def make_directory(self, directory_path, force_env_user=False): """ Create a directory and set the permission level 775 :param directory_path: The directory path :return: None """ + + # TODO + # add error handling for OS errors + logger.debug("Creating Directory {} ".format(directory_path)) - env = {'directory_path': directory_path} - command = CommandFactory.make_directory(directory_path) - utilities.execute_bash(self.connection, command) - logger.debug("Changing permission of directory path {}".format(directory_path)) - command = CommandFactory.change_permission(directory_path) - utilities.execute_bash(self.connection, command) + + command_output, std_err, exit_code = self.run_os_command( + os_command="make_directory", directory_path=directory_path + ) + + logger.debug( + "Changing permission of directory path {}".format(directory_path) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="change_permission", path=directory_path + ) + logger.debug("Changed the permission of directory") + def check_stale_mountpoint(self, path): + + output, stderr, exit_code = self.run_os_command( + os_command="df", path=path + ) + if exit_code != 0: + if "No such file or directory" in stderr: + # this is actually OK + return False + else: + logger.error( + "df retured error - stale mount point or other error" + ) + logger.error( + "stdout: {} stderr: {} exit_code: {}".format( + output, stderr, exit_code + ) + ) + return True + else: + return False + + def get_db_size(self, path: str) -> str: + """ + Get the size of the dataset. + + :param connection: Staging connection. + :param path: Mount location corresponding to dataset + + :return: du command output. + + """ + logger.debug("Started db sizing") + du_std, du_stderr, du_exit_code = self.run_os_command( + os_command="du", mount_path=path + ) + if du_exit_code != 0: + logger.error("Unable to calculate the dataset size") + logger.error(f"stderr: {du_stderr}") + raise UserError( + "Problem with measuring mounted file system", + "Ask OS admin to check mount", + du_stderr, + ) + logger.debug(f"Completed db sizing {du_std}") + return du_std + def create_config_dir(self): """create and return the hidden folder directory with name 'delphix'""" + + # TODO + # clean up error handling + logger.debug("Finding toolkit Path...") - command = CommandFactory.get_dlpx_bin() - bin_directory, std_err, exit_code = utilities.execute_bash(self.connection, command) + bin_directory, std_err, exit_code = self.run_os_command( + os_command="get_dlpx_bin" + ) + if bin_directory is None or bin_directory == "": raise Exception("Failed to find the toolkit directory") # Toolkit directory tested on linux x86_64Bit is 6 level below jq path @@ -141,69 +498,142 @@ def create_config_dir(self): while loop_var: bin_directory = os.path.dirname(bin_directory) loop_var = loop_var - 1 + logger.debug(f"bin_directory={bin_directory}") + logger.debug(f"DELPHIX_HIDDEN_FOLDER={DELPHIX_HIDDEN_FOLDER}") dir_name = bin_directory + "/" + DELPHIX_HIDDEN_FOLDER if not helper_lib.check_dir_present(self.connection, dir_name): - self.make_directory(dir_name) + self.make_directory(dir_name, force_env_user=True) return dir_name def source_bucket_list(self): """ - return all buckets exist on source server. Also contains the information bucketType, ramQuota, ramUsed, + return all buckets exist on source server. Also contains the + information bucketType, ramQuota, ramUsed, numReplicas :return: """ # See the bucket list on source server - logger.debug("Collecting bucket list information present on source server ") - env = {ENV_VAR_KEY: {'password': self.staged_source.parameters.xdcr_admin_password}} - command = CommandFactory.get_source_bucket_list(self.repository.cb_shell_path, - self.source_config.couchbase_src_host, - self.source_config.couchbase_src_port, - self.staged_source.parameters.xdcr_admin) - bucket_list, error, exit_code = utilities.execute_bash(self.connection, command_name=command, **env) - if bucket_list == "" or bucket_list is None: + logger.debug( + "Collecting bucket list information present on source server " + ) + + bucket_list, error, exit_code = self.run_couchbase_command( + couchbase_command="get_source_bucket_list", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.staged_source.parameters.xdcr_admin, + password=self.staged_source.parameters.xdcr_admin_password, + ) + + if bucket_list == "[]" or bucket_list is None: return [] - bucket_list = bucket_list.split("\n") - logger.debug("Source Bucket Information {}".format(bucket_list)) - return bucket_list + else: + logger.debug("clean up json") + bucket_list = bucket_list.replace("u'", "'") + bucket_list = bucket_list.replace("'", '"') + bucket_list = bucket_list.replace("True", '"True"') + bucket_list = bucket_list.replace("False", '"False"') + logger.debug("parse json") + bucket_list_dict = json.loads(bucket_list) + bucket_list_dict = list( + map(helper_lib.remap_bucket_json, bucket_list_dict) + ) + + logger.debug("Source Bucket Information {}".format(bucket_list_dict)) + return bucket_list_dict - def source_bucket_list_offline(self, filename): + def get_backup_date(self, x): + w = x.replace( + "{}/{}/{}".format( + self.parameters.couchbase_bak_loc, + self.parameters.archive_name, + self.parameters.couchbase_bak_repo, + ), + "", + ) + g = re.match(r"/(.+?)/.*", w) + if g: + return g.group(1) + else: + return "" + + def source_bucket_list_offline(self): """ - This function will be used in CB backup manager. It will return the same output as by - source_bucket_list method. To avoid source/production server dependency this function will be used. - In a file, put all the bucket related information of source server. This function will cat and return the - contents of that file. It is useful for cb backup manager ingestion mechanism + This function will be used in CB backup manager. It will return the + same output as by + source_bucket_list method. To avoid source/production server dependency + this function will be used. + In a file, put all the bucket related information of source server. + This function will cat and return the + contents of that file. It is useful for cb backup manager ingestion + mechanism FilePath : /couchbase_src_bucket_info In this file add output of below command: - /opt/couchbase/bin/couchbase-cli bucket-list --cluster :8091 --username $username --password $pass - From here all source bucket list information we can fetch and other related data of this bucket should be placed + /opt/couchbase/bin/couchbase-cli bucket-list --cluster + :8091 --username $username --password $pass + From here all source bucket list information we can fetch and other + related data of this bucket should be placed at backup location. - :param filename: filename(couchbase_src_bucket_info.cfg) where bucket information is kept. + :param filename: filename(couchbase_src_bucket_info.cfg) where bucket + information is kept. :return: bucket list information """ - logger.debug( - "Reading bucket list information of source server from {} ".format(filename)) - command = CommandFactory.read_file(filename) - bucket_list, error, exit_code = utilities.execute_bash(self.connection, command) - if bucket_list == "" or bucket_list is None: - return [] - bucket_list = bucket_list.split("\n") - return bucket_list - def node_init(self): + logger.debug(self.parameters.couchbase_bak_loc) + logger.debug(self.parameters.couchbase_bak_repo) + + bucket_list, error, exit_code = self.run_os_command( + os_command="get_backup_bucket_list", + path=os.path.join( + self.parameters.couchbase_bak_loc, + self.parameters.archive_name, + self.parameters.couchbase_bak_repo, + ), + ) + + backup_list = bucket_list.split("\n") + logger.debug("Bucket search output: {}".format(backup_list)) + date_list = list(map(self.get_backup_date, backup_list)) + date_list.sort() + logger.debug("date list: {}".format(date_list)) + files_to_process = [x for x in backup_list if date_list[-1] in x] + + logger.debug(files_to_process) + + bucket_list_dict = [] + + for f in files_to_process: + + bucket_file_content, error, exit_code = self.run_os_command( + os_command="cat", path=f + ) + + logger.debug(bucket_file_content) + bucket_json = json.loads(bucket_file_content) + bucket_list_dict.append(remap_bucket_json(bucket_json)) + + logger.debug("Bucket search output: {}".format(bucket_list_dict)) + return bucket_list_dict + + def node_init(self, nodeno=1): """ - This method initializes couchbase server node. Where user sets different required paths + This method initializes couchbase server node. Where user sets + different required paths :return: None """ logger.debug("Initializing the NODE") - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - command = CommandFactory.node_init(self.repository.cb_shell_path, self.parameters.couchbase_port, - self.parameters.couchbase_admin, self.parameters.mount_path) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="node_init", + data_path="{}/data_{}".format(self.parameters.mount_path, nodeno), + ) + logger.debug("Command Output {} ".format(command_output)) def get_config_directory(self): """ - Hidden directory path inside mount directory will be returned. which is created in method create_config_dir + Hidden directory path inside mount directory will be returned. which + is created in method create_config_dir :return: Return the config directory """ @@ -219,8 +649,867 @@ def get_config_file_path(self): logger.debug("Config filepath is: {}".format(config_file_path)) return config_file_path + # Defined for future updates + def get_indexes_definition(self): + # by default take from staging but later take from source + logger.debug("Finding indexes....") + password = self.parameters.couchbase_admin_password + user = self.parameters.couchbase_admin + port = self.parameters.couchbase_port + if self.dSource: + if self.parameters.d_source_type == constants.CBBKPMGR: + hostname = self.parameters.couchbase_host + else: + port = self.source_config.couchbase_src_port + user = self.staged_source.parameters.xdcr_admin + password = self.staged_source.parameters.xdcr_admin_password + hostname = self.source_config.couchbase_src_host + else: + hostname = self.connection.environment.host.name + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="get_indexes_name", + hostname=hostname, + username=user, + port=port, + password=password, + ) + + logger.debug("Indexes are {}".format(command_output)) + indexes_raw = json.loads(command_output) + indexes = [] + + logger.debug( + "dSource type for indexes: {}".format( + self.parameters.d_source_type + ) + ) + + if self.parameters.d_source_type == constants.CBBKPMGR: + logger.debug("Only build for backup ingestion") + buckets = {} + for i in indexes_raw["indexes"]: + bucket_name = i["bucket"] + index_name = i["indexName"] + scope_name = i["scope"] if "scope" in i.keys() else "_default" + collection_name = ( + i["collection"] if "collection" in i.keys() else "_default" + ) + + if bucket_name not in buckets: + buckets[bucket_name] = {} + if scope_name not in buckets[bucket_name].keys(): + buckets[bucket_name][scope_name] = {} + if ( + collection_name + not in buckets[bucket_name][scope_name].keys() + ): + buckets[bucket_name][scope_name][collection_name] = [] + + buckets[bucket_name][scope_name][collection_name].append( + index_name + ) + + for bucket_name in buckets.keys(): + for scope_name in buckets[bucket_name].keys(): + for collection_name in buckets[bucket_name][ + scope_name + ].keys(): + ind = buckets[bucket_name][scope_name][collection_name] + if ( + collection_name == "_default" + and scope_name == "_default" + ): + ind_def = ( + f"build index on `{bucket_name}` " + f'(`{"`,`".join(ind)}`)' + ) + else: + ind_def = ( + f"build index on `{bucket_name}`." + f"{scope_name}.{collection_name} " + f'(`{"`,`".join(ind)}`)' + ) + indexes.append(ind_def) + + else: + # full definition for replication + + for i in indexes_raw["indexes"]: + indexes.append( + i["definition"].replace( + 'defer_build":true', 'defer_build":false' + ) + ) + return indexes + + # Defined for future updates + def build_index(self, index_def): + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="build_index", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + index_def=index_def, + ) + + logger.debug("command_output is {}".format(command_output)) + return command_output + + def check_index_build(self): + # set timeout to 12 hours + end_time = time.time() + 3660 * 12 + + tobuild = 1 + + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started + while time.time() < end_time and tobuild != 0: + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="check_index_build", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + ) + + logger.debug("command_output is {}".format(command_output)) + logger.debug("std_err is {}".format(std_err)) + logger.debug("exit_code is {}".format(exit_code)) + try: + command_output_dict = json.loads(command_output) + logger.debug("dict {}".format(command_output_dict)) + tobuild = command_output_dict["results"][0]["unbuilt"] + logger.debug("to_build is {}".format(tobuild)) + helper_lib.sleepForSecond(30) # waiting for 1 second + except Exception as e: + logger.debug(str(e)) + + def save_config(self, what, nodeno=1): + + # TODO + # Error handling + + logger.debug("start save_config") + + targetdir = self.get_config_directory() + target_config_filename = os.path.join( + targetdir, "config.dat_{}".format(nodeno) + ) + target_local_filename = os.path.join( + targetdir, "local.ini_{}".format(nodeno) + ) + target_encryption_filename = os.path.join( + targetdir, "encrypted_data_keys_{}".format(nodeno) + ) + + if nodeno == 1 or int(self.repository.version.split(".")[0]) >= 7: + ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_{}".format(nodeno) + ) + output, err, exit_code = self.run_os_command( + os_command="check_file", file_path=ip_file + ) + if exit_code != 0 and "Found" not in output: + ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_start_{}".format(nodeno) + ) + else: + ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_start_{}".format(nodeno) + ) + + filename = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", + srcname=filename, + trgname=target_config_filename, + ) + + logger.debug( + "save config.dat cp - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + if exit_code != 0: + raise UserError( + "Error saving configuration file: config.dat", + "Check sudo or user privileges to read Couchbase config.dat " + "file", + std_err, + ) + + # encryption data keys may not exist on Community edition + + filename = "{}/../var/lib/couchbase/config/encrypted_data_keys".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + ( + check_encrypted_data_keys, + check_ip_file_err, + exit_code, + ) = self.run_os_command(os_command="check_file", file_path=filename) + + if exit_code == 0 and "Found" in check_encrypted_data_keys: + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", + srcname=filename, + trgname=target_encryption_filename, + ) + + logger.debug( + "save encrypted_data_keys.dat cp - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + if exit_code != 0: + raise UserError( + "Error saving configuration file: encrypted_data_keys", + "Check sudo or user privileges to read Couchbase " + "encrypted_data_keys file", + std_err, + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", srcname=ip_file, trgname=target_ip_filename + ) + + logger.debug( + "save {} - exit_code: {} stdout: {} std_err: {}".format( + ip_file, exit_code, command_output, std_err + ) + ) + + if exit_code != 0: + raise UserError( + "Error saving configuration file: {}".format(ip_file), + "Check sudo or user privileges to read Couchbase " + "{} file".format(ip_file), + std_err, + ) + + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", srcname=filename, trgname=target_local_filename + ) + + logger.debug( + "save local.ini cp - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + if exit_code != 0: + raise UserError( + "Error saving configuration file: local.ini", + "Check sudo or user privileges to read Couchbase local.ini " + "file", + std_err, + ) + + if int(self.repository.version.split(".")[0]) >= 7: + chronicle_target_dir = os.path.join( + targetdir, f"chronicle_{nodeno}" + ) + ( + chronicle_target_dir_command_output, + _, + chronicle_target_dir_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=chronicle_target_dir + ) + if ( + chronicle_target_dir_exit_code == 0 + and "Found" in chronicle_target_dir_command_output + ): + self.run_os_command( + os_command="delete_dir", dirname=chronicle_target_dir + ) + self.run_os_command( + os_command="os_cpr", + srcname="{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ), + trgname=chronicle_target_dir, + ) + if ( + hasattr(self.parameters, "d_source_type") + and self.parameters.d_source_type == constants.CBBKPMGR + ): + self.run_os_command( + os_command="write_file", + filename=os.path.join( + self.parameters.mount_path, ".delphix/backup_restore.txt" + ), + data=self.parameters.archive_name, + ) + + def check_cluster_notconfigured(self): + + logger.debug("check_cluster") + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="get_server_list", + hostname=self.connection.environment.host.name, + ) + + if "unknown pool" in command_output: + return True + else: + return False + + def check_cluster_configured(self): + + logger.debug("check_cluster configured") + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="get_server_list", + hostname=self.connection.environment.host.name, + ) + + if "healthy active" in command_output: + return True + else: + return False + + def check_config(self): + + filename = os.path.join(self.get_config_directory(), "config.dat") + cmd = CommandFactory.check_file(filename) + logger.debug("check file cmd: {}".format(cmd)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd, callback_func=self.ignore_err + ) + + if exit_code == 0 and "Found" in command_output: + return True + else: + return False + + def delete_data_folder(self, nodeno=1): + data_folder = "{}/data_{}".format(self.parameters.mount_path, nodeno) + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=data_folder + ) + logger.debug( + f"check data directory >> command_output=={command_output}" + f" , command_stderr=={command_stderr} , " + f"command_exit_code=={command_exit_code}" + ) + if command_output == "Found": + self.run_os_command(os_command="delete_dir", dirname=data_folder) + + def delete_config_folder(self): + if int(self.repository.version.split(".")[0]) >= 6: + config_directory_path = "{}/../var/lib/couchbase/config".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=config_directory_path + ) + logger.debug( + f"check directory >> command_output=={command_output}" + f" , command_stderr=={command_stderr} , " + f"command_exit_code=={command_exit_code}" + ) + if command_output == "Found": + target_folder = f"{config_directory_path}_bkp" + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=target_folder + ) + if command_output == "Found": + self.run_os_command( + os_command="delete_dir", dirname=target_folder + ) + self.run_os_command( + os_command="os_mv", + srcname=config_directory_path, + trgname=target_folder, + ) + # logger.debug( + # f"mv directory >> command_output=={command_output}" + # f" , command_stderr=={command_stderr} , " + # f"command_exit_code=={command_exit_code}" + # ) + + def delete_xdcr_config(self): + if self.parameters.d_source_type == "XDCR": + is_xdcr_setup, cluster_name = self.delete_replication() + if is_xdcr_setup: + logger.info("Deleting XDCR") + self.xdcr_delete(cluster_name) + + def restore_config(self, what, nodeno=1): + + # TODO + # Error handling + + logger.debug("start restore_config") + + sourcedir = self.get_config_directory() + + source_config_file = os.path.join( + sourcedir, "config.dat_{}".format(nodeno) + ) + source_local_filename = os.path.join( + sourcedir, "local.ini_{}".format(nodeno) + ) + source_encryption_keys = os.path.join( + sourcedir, "encrypted_data_keys_{}".format(nodeno) + ) + + source_ip_file = os.path.join(sourcedir, "ip_{}".format(nodeno)) + target_ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + delete_ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + check_ip_file, check_ip_file_err, exit_code = self.run_os_command( + os_command="check_file", file_path=source_ip_file + ) + + if not (exit_code == 0 and "Found" in check_ip_file): + source_ip_file = os.path.join( + sourcedir, "ip_start_{}".format(nodeno) + ) + target_ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + delete_ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + logger.debug("IP file is {}".format(source_ip_file)) + + targetfile = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", srcname=source_config_file, trgname=targetfile + ) + + logger.debug( + "config.dat restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + ( + check_encrypted_data_keys, + check_ip_file_err, + exit_code, + ) = self.run_os_command( + os_command="check_file", file_path=source_encryption_keys + ) + + logger.debug( + "Check check_encrypted_data_keys - exit_code: {} " + "stdout: {}".format(exit_code, check_encrypted_data_keys) + ) + + if exit_code == 0 and "Found" in check_encrypted_data_keys: + targetfile = ( + "{}/../var/lib/couchbase/config/encrypted_data_keys".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + ) + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", + srcname=source_encryption_keys, + trgname=targetfile, + ) + + logger.debug( + "encrypted_data_keys restore - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + ( + check_ip_delete_file, + check_ip_delete_file, + check_ip_exit_code, + ) = self.run_os_command( + os_command="check_file", file_path=delete_ip_file + ) + + logger.debug( + "Check delete old ip_file - exit_code: {} stdout: {}".format( + check_ip_exit_code, check_ip_delete_file + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_mv", + srcname=delete_ip_file, + trgname="{}.bak".format(delete_ip_file), + ) + + logger.debug( + "ipfile delete - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", srcname=source_ip_file, trgname=target_ip_file + ) + + logger.debug( + "ipfile restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + targetfile = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cp", + srcname=source_local_filename, + trgname=targetfile, + ) + + logger.debug( + "local.ini restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + if int(self.repository.version.split(".")[0]) >= 7: + source_chronicle_dirname = os.path.join( + sourcedir, "chronicle_{}".format(nodeno) + ) + target_chronicle_dirname = ( + "{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + ) + command_output, std_err, exit_code = self.run_os_command( + os_command="check_directory", dir_path=target_chronicle_dirname + ) + if exit_code == 0 and "Found" in command_output: + self.run_os_command( + os_command="delete_dir", dirname=target_chronicle_dirname + ) + command_output, std_err, exit_code = self.run_os_command( + os_command="os_cpr", + srcname=source_chronicle_dirname, + trgname=target_chronicle_dirname, + ) + + logger.debug( + "chronicle restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + if what == "parent": + # local.ini needs to have a proper entry + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + newpath = "{}/data_{}".format(self.parameters.mount_path, nodeno) + cmd = CommandFactory.sed( + filename, + "s|view_index_dir.*|view_index_dir={}|".format(newpath), + self.need_sudo, + self.uid, + ) + logger.debug("sed config cmd: {}".format(cmd)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "setting index paths - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + cmd = CommandFactory.sed( + filename, + "s|database_dir.*|database_dir={}|".format(newpath), + self.need_sudo, + self.uid, + ) + logger.debug("sed config cmd: {}".format(cmd)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "setting data paths - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + def delete_config(self): + + # TODO: + # error handling + + logger.debug("start delete_config") + + filename = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + + cmd = CommandFactory.check_file(filename, self.need_sudo, self.uid) + logger.debug("check file cmd: {}".format(cmd)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd, callback_func=self.ignore_err + ) + + if exit_code == 0 and "Found" in command_output: + cmd = CommandFactory.os_mv( + filename, "{}.bak".format(filename), self.need_sudo, self.uid + ) + logger.debug("rename config cmd: {}".format(cmd)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "rename config.dat to bak - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + command_output, std_err, exit_code = self.run_os_command( + os_command="sed", filename=filename, regex="s/view_index_dir.*//" + ) + + logger.debug( + "clean local.ini index - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + command_output, std_err, exit_code = self.run_os_command( + os_command="sed", filename=filename, regex="s/database_dir.*//" + ) + + logger.debug( + "clean local.ini data - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + command_output, std_err, exit_code = self.run_os_command( + os_command="change_permission", path=filename + ) + + logger.debug( + "fix local.ini permission - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) + + chronicle_dir_name = "{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + self.run_os_command( + os_command="delete_dir", dirname=chronicle_dir_name + ) + + def ignore_err(self, input): + return True + + def rename_cluster(self): + """Rename cluster based on user entries""" + + logger.debug("start rename_cluster") + self.run_couchbase_command( + couchbase_command="rename_cluster", + username=self.snapshot.couchbase_admin, + password=self.snapshot.couchbase_admin_password, + newname=self.parameters.tgt_cluster_name, + ) + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="change_cluster_password", + username=self.snapshot.couchbase_admin, + password=self.snapshot.couchbase_admin_password, + newuser=self.parameters.couchbase_admin, + newpass=self.parameters.couchbase_admin_password, + ) + + logger.debug( + "rename cluster - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) + + def start_node_bootstrap(self): + logger.debug("start start_node_bootstrap") + self.start_couchbase(no_wait=True) + end_time = time.time() + 3660 + server_status = Status.INACTIVE + + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started + while time.time() < end_time and server_status != Status.ACTIVE: + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = self.staging_bootstrap_status() # fetching status + logger.debug("server status {}".format(server_status)) + + def addnode(self, nodeno, node_def): + logger.debug("start addnode") + + self.delete_config() + + self.start_node_bootstrap() + + self.node_init(nodeno) + + helper_lib.sleepForSecond(10) + + services = ["data", "index", "query"] + + if "fts_service" in node_def and node_def["fts_service"]: + services.append("fts") + + if "eventing_service" in node_def and node_def["eventing_service"]: + services.append("eventing") + + if "analytics_service" in node_def and node_def["analytics_service"]: + services.append("analytics") + + logger.debug("services to add: {}".format(services)) + + logger.debug("node host name / IP: {}".format(node_def["node_addr"])) + + resolve_name_command = CommandFactory.resolve_name( + hostname=node_def["node_addr"] + ) + logger.debug( + "resolve_name_command command: {}".format(resolve_name_command) + ) + resolve_name_output, std_err, exit_code = utilities.execute_bash( + self.connection, resolve_name_command + ) + logger.debug( + "resolve_name_command Output {} ".format(resolve_name_output) + ) + + if int(self.repository.version.split(".")[0]) >= 7: + if "(CE)" in self.repository.version: + new_port = "8091" + else: + new_port = "18091" + else: + new_port = "18091" + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="server_add", + hostname=self.connection.environment.host.name, + newhost=resolve_name_output, + services=",".join(services), + new_port=new_port, + ) + + logger.debug( + "Add node Output {} stderr: {} exit_code: {} ".format( + command_output, std_err, exit_code + ) + ) + + if exit_code != 0: + logger.debug("Adding node error") + raise UserError( + "Problem with adding node", + "Check an output and fix problem before retrying to provision " + "a VDB", + "stdout: {} stderr:{}".format(command_output, std_err), + ) + + command_output, std_err, exit_code = self.run_couchbase_command( + couchbase_command="rebalance", + hostname=self.connection.environment.host.name, + ) + + logger.debug( + "Rebalance Output {} stderr: {} exit_code: {} ".format( + command_output, std_err, exit_code + ) + ) + + if exit_code != 0: + logger.debug("Rebalancing error") + raise UserError( + "Problem with rebalancing cluster", + "Check an output and fix problem before retrying to provision " + "a VDB", + "stdout: {} stderr:{}".format(command_output, std_err), + ) + if __name__ == "__main__": - print "Checking Couchbase Class" - test_object = CouchbaseOperation(Resource.ObjectBuilder.set_dsource(True).build()) - print (test_object.get_config_file_path.__doc__) + # print "Checking Couchbase Class" + test_object = CouchbaseOperation( + Resource.ObjectBuilder.set_dsource(True).build() + ) + print(test_object.get_config_file_path.__doc__) diff --git a/src/controller/db_exception_handler.py b/src/controller/db_exception_handler.py index ee73936..a7ce39b 100644 --- a/src/controller/db_exception_handler.py +++ b/src/controller/db_exception_handler.py @@ -1,28 +1,34 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -import types -import re import logging +import re +import sys +import traceback +import types -from db_commands.constants import CLUSTER_ALREADY_PRESENT, BUCKET_NAME_ALREADY_EXIST, MULTIPLE_VDB_ERROR, \ - SHUTDOWN_FAILED, ALREADY_CLUSTER_INIT, ALREADY_CLUSTER_FOR_BUCKET -from controller import helper_lib +from db_commands.constants import ALREADY_CLUSTER_FOR_BUCKET +from db_commands.constants import ALREADY_CLUSTER_INIT +from db_commands.constants import BUCKET_NAME_ALREADY_EXIST +from db_commands.constants import CLUSTER_ALREADY_PRESENT +from db_commands.constants import MULTIPLE_VDB_ERROR +from db_commands.constants import SHUTDOWN_FAILED +from dlpx.virtualization.platform.exceptions import UserError from internal_exceptions.base_exceptions import GenericUserError from internal_exceptions.plugin_exceptions import ERR_RESPONSE_DATA logger = logging.getLogger(__name__) -# This is meta class which decorates the each functions of child class with below things: +# This is meta class which decorates the each functions of child +# class with below things: # Ignore common exceptions # Enable logging in more intuitive way class DatabaseExceptionHandlerMeta(type): - - def __new__(mcs, caller_name, caller_base_name, attributes_in_caller): + def __new__(mcs, caller_name, caller_base_name, attr_in_caller): """ :param caller_name: :type caller_name: Class type @@ -34,55 +40,88 @@ def __new__(mcs, caller_name, caller_base_name, attributes_in_caller): """ # iteration for each method of a caller class - for attribute_name, attribute_value in attributes_in_caller.iteritems(): + for attribute_name, attribute_value in attr_in_caller.iteritems(): if isinstance(attribute_value, types.FunctionType): - if attribute_name == "__init__" or attribute_name == "status" or attribute_name == "check_attribute_error": + if ( + attribute_name == "__init__" + or attribute_name == "status" + or attribute_name == "check_attribute_error" + ): continue - attributes_in_caller[attribute_name] = mcs.handle_exception_decorator(attribute_value) + a = mcs.handle_exception_decorator(attribute_value) + attr_in_caller[attribute_name] = a try: - return super(DatabaseExceptionHandlerMeta, mcs).__new__(mcs, caller_name, caller_base_name, - attributes_in_caller) + return super(DatabaseExceptionHandlerMeta, mcs).__new__( + mcs, caller_name, caller_base_name, attr_in_caller + ) except Exception as err: - logger.debug("Exception occurred in metaclass: {}".format(err.message)) + logger.debug( + "Exception occurred in metaclass: {}".format( + str(err), + ) + ) raise @classmethod def _exception_generator_factory(mcs, err_string): """ :param err_string: - :raises: Exceptions based on the output. It matches the error string with predefined strings. - In some cases we need to kill the program and in some cases it is not. This is distinguished by the + :raises: Exceptions based on the output. It matches the error string + with predefined strings. + In some cases we need to kill the program and in some cases it + is not. This is distinguished by the error string. """ - if (re.search(CLUSTER_ALREADY_PRESENT, err_string) or - re.search(BUCKET_NAME_ALREADY_EXIST, err_string) or - re.search(MULTIPLE_VDB_ERROR, err_string) or - re.search(SHUTDOWN_FAILED, err_string) or - re.search(ALREADY_CLUSTER_FOR_BUCKET, err_string) or - re.search(ALREADY_CLUSTER_INIT, err_string)): + if ( + re.search(CLUSTER_ALREADY_PRESENT, err_string) + or re.search(BUCKET_NAME_ALREADY_EXIST, err_string) + or re.search(MULTIPLE_VDB_ERROR, err_string) + or re.search(SHUTDOWN_FAILED, err_string) + or re.search(ALREADY_CLUSTER_FOR_BUCKET, err_string) + or re.search(ALREADY_CLUSTER_INIT, err_string) + ): logger.debug("Gracefully accepted the last exception") return logger.debug("Searching predefined exception for this error") err_code = get_err_code(err_string) - raise GenericUserError(ERR_RESPONSE_DATA[err_code]['MESSAGE'], ERR_RESPONSE_DATA[err_code]['ACTION'], err_string) + raise GenericUserError( + ERR_RESPONSE_DATA[err_code]["MESSAGE"], + ERR_RESPONSE_DATA[err_code]["ACTION"], + err_string, + ) @classmethod def handle_exception_decorator(mcs, function_name): """ - Decorating function with exception handling. Also we can control the output of each couchbase + Decorating function with exception handling. Also we can control the + output of each couchbase command at single place. :param function_name: Method of a class which is not static and class :type : function :return : None - """ + """ def wrapper_function(*args, **kwargs): try: output_list = function_name(*args, **kwargs) return output_list + + except UserError: + logger.debug("User Error found") + ttype, value, traceb = sys.exc_info() + logger.debug("type: {}, value: {}".format(ttype, value)) + logger.debug("trackback") + logger.debug(traceback.format_exc()) + raise + except Exception as error: - logger.debug("Caught Exception : {}".format(error.message)) - mcs._exception_generator_factory(error.message) + logger.debug("Caught Exception : {}".format(str(error))) + logger.debug("pioro") + ttype, value, traceb = sys.exc_info() + logger.debug("type: {}, value: {}".format(ttype, value)) + logger.debug("trackback") + logger.debug(traceback.format_exc()) + mcs._exception_generator_factory(str(error)) return wrapper_function @@ -95,4 +134,4 @@ def get_err_code(error_string): search_string = ERR_RESPONSE_DATA[each_err_code]["ERR_STRING"] if re.search(search_string, error_string): return each_err_code - return 'DEFAULT_ERR' + return "DEFAULT_ERR" diff --git a/src/controller/helper_lib.py b/src/controller/helper_lib.py index dfe0f19..b3d5462 100644 --- a/src/controller/helper_lib.py +++ b/src/controller/helper_lib.py @@ -1,16 +1,19 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2024 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ - This module contains common functionality that is being used across plugin. Like bucket size calculation, read file, - write data into file and also operations required in discovery. Moreover it helps in colorful logging in debug log. - Recommending to view the logs using the tail command then easily segregate the running command/output/exception/debug - messages + This module contains common functionality that is being used across plugin. + Like bucket size calculation, read file, write data into file and also + operations required in discovery. + Moreover it helps in colorful logging in debug log. Recommending to view the + logs using the tail command then easily segregate the + running command/output/exception/debug + messages. """ -####################################################################################################################### +############################################################################## import json import logging @@ -21,12 +24,15 @@ import time from datetime import datetime -import db_commands.constants +import db_commands from db_commands.commands import CommandFactory from db_commands.constants import DEFAULT_CB_BIN_PATH - -from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError, SourceConfigDiscoveryError, FileIOError, \ - UnmountFileSystemError +from dlpx.virtualization.platform import Status +from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.plugin_exceptions import FileIOError +from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError +from internal_exceptions.plugin_exceptions import SourceConfigDiscoveryError +from internal_exceptions.plugin_exceptions import UnmountFileSystemError from utils import utilities # Global logger object for this file @@ -36,15 +42,25 @@ def find_binary_path(source_connection): """ :param source_connection: Connection for the source environment - :return: Bin path defined in environment variable '$COUCHBASE_PATH'. If it is not defined then "/opt/couchbase/bin" + :return: Bin path defined in environment variable '$COUCHBASE_PATH'. + If it is not defined then "/opt/couchbase/bin" """ logger.debug("Finding Binary Path...") - binary_paths, std_err, exit_code = utilities.execute_bash(source_connection, CommandFactory.find_binary_path()) + binary_paths, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_binary_path() + ) if binary_paths == "": - logger.debug("Please verify COUCHBASE_PATH is defined. Checking at default location {}".format(DEFAULT_CB_BIN_PATH)) + logger.debug( + "Please verify COUCHBASE_PATH is defined. Checking at default " + "location {}".format(DEFAULT_CB_BIN_PATH) + ) binary_paths = DEFAULT_CB_BIN_PATH else: - logger.debug("List of couchbase path found are {}".format(binary_paths.split(';'))) + logger.debug( + "List of couchbase path found are {}".format( + binary_paths.split(";") + ) + ) logger.debug("Finding Binary: {}".format(binary_paths)) return binary_paths @@ -56,8 +72,9 @@ def find_shell_path(source_connection, binary_path): :return:path of cluster management utility: {couchbase-cli} """ logger.debug("Finding Shell Path...") - shell_path, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.find_shell_path(binary_path)) + shell_path, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_shell_path(binary_path) + ) if shell_path == "": message = "Shell path {}/couchbase-cli not found".format(binary_path) raise RepositoryDiscoveryError(message) @@ -69,31 +86,82 @@ def find_install_path(source_connection, binary_path): :param source_connection:Connection for the source environment :param binary_path: Couchbase binary path - :return: path of couchbase-server, through which daemon processes can start in background + :return: path of couchbase-server, through which daemon processes can + start in background """ logger.debug("Finding install Path...") - install_path, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.find_install_path(binary_path)) + install_path, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_install_path(binary_path) + ) if install_path == "": - message = "Install path {}/couchbase-server not found".format(binary_path) + message = "Install path {}/couchbase-server not found".format( + binary_path + ) raise RepositoryDiscoveryError(message) else: - logger.debug("couchbase-server found in directory {}".format(install_path)) + logger.debug( + "couchbase-server found in directory {}".format(install_path) + ) return install_path def find_version(source_connection, install_path): - """ return the couchbase version installed on the host""" - cb_version, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.get_version(install_path)) + """return the couchbase version installed on the host""" + cb_version, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_version(install_path) + ) version = re.search(r"\d.*$", cb_version).group() logger.debug("Couchbase version installed {}".format(version)) return version +def find_ids(source_connection, install_path): + """return the couchbase uid and gid""" + std_out, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_ids(install_path) + ) + logger.debug("find ids output: {}".format(std_out)) + ids = re.search(r"[-rwx.]+\s\d\s([\d]+)\s([\d]+).*", std_out) + if ids: + uid = int(ids.group(1)) + gid = int(ids.group(2)) + else: + uid = -1 + gid = -1 + logger.debug("Couchbase user uid {} gid {}".format(uid, gid)) + return (uid, gid) + + +def find_whoami(source_connection): + """return the user env id""" + std_out, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.whoami() + ) + logger.debug("find whoami output: {}".format(std_out)) + ids = re.search(r"uid=([\d]+).*gid=([\d]+)", std_out) + if ids: + uid = int(ids.group(1)) + gid = int(ids.group(2)) + else: + uid = -1 + gid = -1 + logger.debug("Delphix user uid {} gid {}".format(uid, gid)) + return (uid, gid) + + +def need_sudo(source_connection, couchbase_uid, couchbase_gid): + (uid, gid) = find_whoami(source_connection) + if uid != couchbase_uid or gid != couchbase_gid: + return True + else: + return False + + def is_instance_present_of_gosecrets(source_connection): - """ check couchbase server is running or not""" - instance, stderr, exit_code = utilities.execute_bash(source_connection, CommandFactory.get_process()) + """check couchbase server is running or not""" + instance, stderr, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_process() + ) # return true if 'gosecrets' string is present in output of get_process return "gosecrets" in instance @@ -104,36 +172,64 @@ def get_data_directory(source_connection, repository): couchbase_base_dir = os.path.dirname(couchbase_binary_path) filename = "{}/etc/couchbase/static_config".format(couchbase_base_dir) static_config, stderr, exit_code = read_file(source_connection, filename) - if not re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config): + if not re.search( + r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config + ): message = "Cannot find data directory" logger.debug(message) raise SourceConfigDiscoveryError(message) - data_directory = re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config).group() + data_directory = re.search( + r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config + ).group() logger.debug("data_directory is {} ".format(data_directory)) return data_directory def get_base_directory_of_given_path(binary_path): - """ Return the base directory of given path """ + """Return the base directory of given path""" path = os.path.split(binary_path)[0] return path -def get_all_bucket_list_with_size(bucket_output, bucket=None): - """ Return bucket name with ramUsed( adjust ramused value ) from bucket_output""" - logger.debug("bucket_output: {}".format(bucket_output)) +def remap_bucket_json(bucket): + output = {} + if "bucketType" in bucket: + output["bucketType"] = bucket["bucketType"] + if "name" in bucket: + output["name"] = bucket["name"] + if "quota" in bucket and "ram" in bucket["quota"]: + output["ram"] = bucket["quota"]["ram"] + elif "ramQuota" in bucket: + # this is in MB + output["ram"] = int(bucket["ramQuota"]) * 1024 * 1024 + else: + logger.debug("No memory in bucket - setting to default") + output["ram"] = 1024000 + if "compressionMode" in bucket: + output["compressionMode"] = bucket["compressionMode"] + else: + output["compressionMode"] = None + return output + + +def get_all_bucket_list_with_size(bucket_output): + """ + Return bucket name with ramUsed( adjust ramused value ) + from bucket_output + """ + additional_buffer = 10 min_size = 104857600 all_bucket_list = "" for line in bucket_output: - bucket_name = None - ram_size = 0 - if line.find(':') == -1: # find the bucket name + if line.find(":") == -1: # find the bucket name all_bucket_list = all_bucket_list + line + "," elif line.find("ramUsed") != -1: # find ramUsed row in output - ram_size = int(line.split(':')[1].strip()) + ram_size = int(line.split(":")[1].strip()) # Formula used used bucketsize/2 + 10% additional memory - ram_size = (ram_size) / 2 + ((ram_size / 2) * additional_buffer // 100) + ram_size = (ram_size) / 2 + ( + (ram_size / 2) * additional_buffer // 100 + ) if ram_size < min_size: ram_size = min_size all_bucket_list = all_bucket_list + str(ram_size) + ":" @@ -143,33 +239,71 @@ def get_all_bucket_list_with_size(bucket_output, bucket=None): def get_stg_all_bucket_list_with_ramquota_size(bucket_output): - """ Return bucket name with ramQuota from bucket_output. It will help in VDB creation as a reference value for - bucket + """Return bucket name with ramQuota from bucket_output. It will help in + VDB creation as a reference value for + bucket """ logger.debug("bucket_output: {}".format(bucket_output)) all_bucket_list = "" for line in bucket_output: - bucket_name = None - if line.find(':') == -1: # find the bucket name + if line.find(":") == -1: # find the bucket name all_bucket_list = all_bucket_list + line + "," - elif line.find("ramQuota") != -1: # find ramQuota row in output - ram_quota = int(line.split(':')[1].strip()) + elif line.find("ramQuota") != -1: # find ramQuota row in output + ram_quota = int(line.split(":")[1].strip()) all_bucket_list = all_bucket_list + str(ram_quota) + ":" all_bucket_list = all_bucket_list.strip(":") logger.debug("All bucket list is: {}".format(all_bucket_list)) return all_bucket_list.split(":") +def filter_bucket_name_from_json(bucket_output): + """Filter bucket name from bucket_output. Return list of + bucket names present in bucket_output""" + output = [x["name"] for x in bucket_output if x["ram"] > 0] + logger.debug("Bucket list: {}".format(output)) + return output + + def filter_bucket_name_from_output(bucket_output): - """ Filter bucket name from bucket_output. Return list of bucket names present in bucket_output""" - output = filter(lambda bucket: bucket.find(":") == -1, bucket_output) + """ + Filter bucket name from bucket_output. + Return list of bucket names present in bucket_output + """ + output = [] + logger.debug("filter input: {}".format(bucket_output)) + logger.debug("filter input: {}".format(len(bucket_output))) + if bucket_output != []: + output = list(map(lambda x: x["name"], bucket_output)) logger.debug("Bucket list: {}".format(output)) return output +def get_bucket_object(bucket_output, bucket): + """ + Return bucket dict + from bucket_output string for bucket(passed in argument) + """ + output = filter(lambda x: x["name"] == bucket, bucket_output) + if len(output) != 1: + ret = None + else: + ret = output[-1] + logger.debug("For Bucket {} detail is : {}".format(bucket, ret)) + return ret + + def get_bucket_name_with_size(bucket_output, bucket): - """ Return `bucket_name:ramUsed` as output from bucket_output string for bucket(passed in argument) """ - output = get_all_bucket_list_with_size(bucket_output, bucket) + """ + Return `bucket_name:ramUsed` + as output from bucket_output string for bucket(passed in argument) + """ + + logger.debug("HUHU") + logger.debug(bucket_output) + + output = get_all_bucket_list_with_size(bucket_output) + logger.debug("HAHA") + logger.debug(output) output = ":".join(output) bucket_info = re.search(r"{},\d+".format(bucket), output).group() logger.debug("For Bucket {} detail is : {}".format(bucket, bucket_info)) @@ -177,26 +311,28 @@ def get_bucket_name_with_size(bucket_output, bucket): def get_bucketlist_to_namesize_list(bucket_output, bucket_list): - """ Return `bucket_name:ramUsed` as output from bucket_output string for each bucket(passed in bucket_list) """ + """Return `bucket_name:ramUsed` as output from bucket_output + string for each bucket(passed in bucket_list)""" bucket_details = [] for name in bucket_list: bucket_details.append(get_bucket_name_with_size(bucket_output, name)) - logger.debug("Buckets: {} \n details : {}".format(bucket_list, bucket_details)) + logger.debug( + "Buckets: {} \n details : {}".format(bucket_list, bucket_details) + ) return bucket_details - def sleepForSecond(sec): # Sleep/Pause the execution for given seconds + logger.debug("sleeping for {}".format(sec)) time.sleep(sec) - + logger.debug("sleeping is over") def current_time(): - """ Return current time in format of %Y%m%d%H%M%S'""" + """Return current time in format of %Y%m%d%H%M%S'""" curr_time = datetime.now() - return curr_time.strftime('%Y%m%d%H%M%S') - + return curr_time.strftime("%Y%m%d%H%M%S") def get_value_of_key_from_json(json_obj, key): @@ -207,40 +343,45 @@ def get_value_of_key_from_json(json_obj, key): def write_file(connection, content, filename): """Add given data into passed filename""" - logger.debug("writing data {} in file {}".format(content,filename)) + logger.debug("writing data {} in file {}".format(content, filename)) try: - utilities.execute_bash(connection, CommandFactory.write_file(data=content, filename=filename)) - except Exception as e: + utilities.execute_bash( + connection, + CommandFactory.write_file(data=content, filename=filename), + ) + except Exception: logger.debug("Failed to Write into file") raise FileIOError("Failed to Write into file ") - def check_file_present(connection, config_file_path): - """ return True if file is present else return False""" + """return True if file is present else return False""" try: - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_file(config_file_path)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.check_file(config_file_path) + ) if stdout == "Found": logger.debug("file path exist {}".format(config_file_path)) return True - except Exception as e: + except Exception: logger.debug("File path not exist {}".format(config_file_path)) return False def check_dir_present(connection, dir): - """ return True if directory is present else return False""" + """return True if directory is present else return False""" try: - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_directory(dir)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.check_directory(dir) + ) if stdout == "Found": logger.debug("dir path found {} ".format(dir)) return True except Exception as err: - logger.debug("directory path is absent: {}".format(err.message)) + logger.debug("directory path is absent: {}".format(str(err))) return False - def read_file(connection, filename): """read the file content and return the content""" logger.debug("Reading file {}".format(filename)) @@ -252,7 +393,9 @@ def read_file(connection, filename): # delete file def delete_file(connection, filename): logger.debug("Deleting file {}".format(filename)) - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.delete_file(filename)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.delete_file(filename) + ) return [stdout, stderr, exit_code] @@ -262,17 +405,26 @@ def get_snapshot_id(): def unmount_file_system(rx_connection, path): - """ unmount the file system which will use in cbbackup manager after post snapshot""" + """unmount the file system which will use in cbbackup manager + after post snapshot""" try: - utilities.execute_bash(rx_connection, CommandFactory.unmount_file_system(path)) + utilities.execute_bash( + rx_connection, CommandFactory.unmount_file_system(path) + ) except Exception as err: - logger.debug("error here {}".format(err.message)) - raise UnmountFileSystemError(err.message) + logger.debug("error here {}".format(str(err))) + raise UnmountFileSystemError(str(err)) -def get_bucket_size_in_MB(bucket_size, bkt_name_size): - """ convert bkt size into MB if current bucket_size is zero""" - bkt_size_mb = 0 +def get_bucket_size_in_MB(bucket_size_list, bkt_name_size, bucket_name): + """convert bkt size into MB if current bucket_size is zero""" + bucket_size = 0 + for bucket_dict in bucket_size_list: + bname = bucket_dict["bname"] + bsize = bucket_dict["bsize"] + if bname == "*" or bname == bucket_name: + bucket_size = bsize + break if bucket_size > 0: bkt_size_mb = bucket_size @@ -288,3 +440,84 @@ def get_sync_lock_file_name(dsource_type, dsource_name): striped_dsource_name = dsource_name.replace(" ", "") sync_filename = str(striped_dsource_name) + str(sync_filename) return sync_filename + + +def check_stale_mountpoint(connection, path): + + output, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.df(path) + ) + if exit_code != 0: + if "No such file or directory" in stderr: + # this is actually OK + return False + else: + logger.error("df retured error - stale mount point or other error") + logger.error( + "stdout: {} stderr: {} exit_code: {}".format( + output, stderr, exit_code + ) + ) + return True + else: + return False + + +def check_server_is_used(connection, path): + + ret = Status.INACTIVE + + output, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.mount() + ) + if exit_code != 0: + logger.error("mount retured error") + logger.error( + "stdout: {} stderr: {} exit_code: {}".format( + output, stderr, exit_code + ) + ) + raise UserError( + "Problem with reading mounted file systems", + "Ask OS admin to check mount", + stderr, + ) + else: + # parse a mount output to find another Delphix mount points + fs_re = re.compile(r"(\S*)\son\s(\S*)\stype\s(\S*)") + for i in output.split("\n"): + match = re.search(fs_re, i) + if match is not None: + groups = match.groups() + if groups[2] and str(groups[2]).startswith("nfs"): + if path == groups[1]: + # this is our mount point - skip it + ret = Status.ACTIVE + continue + if "domain0" in groups[0] and "timeflow" in groups[0]: + # this is a delphix mount point but it's not ours + # raise an exception + raise UserError( + "Another database (VDB or staging) is using " + "this server.", + "Disable another one to provision or " + "enable this one", + "{} {}".format(groups[0], groups[1]), + ) + + return ret + + +def clean_stale_mountpoint(connection, path): + umount_std, umount_stderr, umount_exit_code = utilities.execute_bash( + connection, + CommandFactory.unmount_file_system(mount_path=path, options="-lf"), + ) + if umount_exit_code != 0: + logger.error("Problem with cleaning mount path") + logger.error("stderr {}".format(umount_stderr)) + raise UserError( + "Problem with cleaning mount path", + "Ask OS admin to check mount points", + umount_stderr, + ) diff --git a/src/controller/resource_builder.py b/src/controller/resource_builder.py index e03770b..6525a9b 100644 --- a/src/controller/resource_builder.py +++ b/src/controller/resource_builder.py @@ -1,40 +1,55 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ There are two purposes which this module is created for: Purpose1: - This class is being used by child classes to initialize their attributes. Child classes of this are : - _bucket.py,_cb_backup.py, _cluster.py, _replication.py, _xdcr.py. To add any new feature let say 'X', create a class + This class is being used by child classes to initialize their attributes. + Child classes of this are : + _bucket.py,_cb_backup.py, _cluster.py, _replication.py, _xdcr.py. To add any + new feature let say 'X', create a class for that 'X' feature in x module and make the Resource class as parent for X. Here we are using builder design pattern to initialize the properties. Reason of using this approach: - 1: No need fixed combinations of objects - There could be multiple attributes combinations based on their availability. Possible combinations are like objects of - ('repository' + 'virtual_source' )or (' repository' +'staged_source'). Instead of creating multiple constructors, - followed this approach in which whatever the parameters available to object creator, pass only those. + 1: No need fixed combinations of objects. + There could be multiple attributes combinations based on their availability. + Possible combinations are like objects of + ('repository' + 'virtual_source' )or (' repository' +'staged_source'). + Instead of creating multiple constructors, + followed this approach in which whatever the parameters available to object + creator, pass only those. Remaining class attributes will be set as 'None'. - To create object use below format, type of obj is Resource. `Example`: - obj=Resource.ObjectBuilder().set_snapshot_parameters("SnapshotParams").set_snapshot("Snapshot").set_dsource(False).build() - Also we must end the object creation with build(), after which only ObjectBuilder will get to know about no more + To create object use below format, type of obj is Resource. + `Example`: + obj=Resource.ObjectBuilder().set_snapshot_parameters("SnapshotParams"). + set_snapshot("Snapshot").set_dsource(False).build() + Also we must end the object creation with build(), after which only + ObjectBuilder will get to know about no more attributes to set. - 2: No need to remember which constructor should be called for any particular purpose + 2: No need to remember which constructor should be called for any particular + purpose 3: No need to remember the order of parameters - 4: If you want to add other parameters in this class, refactoring will be easier in this approach + 4: If you want to add other parameters in this class, refactoring will be + easier in this approach Part2: - __metaclass__ of this class is DatabaseExceptionHandlerMeta. All child classes of Resource will automatically - inherit this property. Child classes will be decorated with small features for now, which we can scale. + __metaclass__ of this class is DatabaseExceptionHandlerMeta. All child classes + of Resource will automatically + inherit this property. Child classes will be decorated with small features for + now, which we can scale. Current usage: more readable logs and handling of ignorable exceptions. - Basically there is a decorator(inside metaclass) which is being applied on all methods defined inside the child class. - Through this design, no need to write decorators on top of each function manually. + Basically there is a decorator(inside metaclass) which is being applied on all + methods defined inside the child class. + Through this design, no need to write decorators on top of each function + manually. """ -####################################################################################################################### +############################################################################## import logging -from db_exception_handler import DatabaseExceptionHandlerMeta + +from controller.db_exception_handler import DatabaseExceptionHandlerMeta logger = logging.getLogger(__name__) @@ -44,14 +59,20 @@ class Resource(object): def __init__(self, builder): """ - It requires the builder object to initialize the parameters of this class. + It requires the builder object to initialize the parameters of this + class. builder is object of inner class: ObjectBuilder :param builder: :return Object of Resource """ - # Validating the type of builder. It must be of two type (type or Resource). Else it will raise an Exception for + # Validating the type of builder. It must be of two type + # (type or Resource). Else it will raise an Exception for # other cases like string, int or object of any other class. - if isinstance(builder, type) or isinstance(builder, Resource) or builder.__class__.__name__ == 'Resource': + if ( + isinstance(builder, type) + or isinstance(builder, Resource) + or builder.__class__.__name__ == "Resource" + ): self.connection = builder.connection self.repository = builder.repository self.source_config = builder.source_config @@ -62,13 +83,20 @@ def __init__(self, builder): self.dSource = builder.dSource self.parameters = builder.parameters else: - logger.debug("Error, Expected builder object, Found: {} ".format(type(builder))) + logger.debug( + "Error, Expected builder object, Found: {} ".format( + type(builder) + ) + ) raise Exception( - "Failed to initialize the Resource object. Expected: ObjectBuilder, Found: {} ".format(type(builder))) + "Failed to initialize the Resource object. Expected: " + "ObjectBuilder, Found: {} ".format(type(builder)) + ) class ObjectBuilder(object): # Below are the same parameters which is required in Resource class - # All setters must be decorated with classmethod, because there will not be any instance of ObjectBuilder + # All setters must be decorated with classmethod, because there will + # not be any instance of ObjectBuilder connection = None repository = None source_config = None @@ -127,21 +155,32 @@ def set_dsource(cls, is_dSource=True): cls.dSource = is_dSource return cls - # it must be last step in order to provide the outer class object(Resource) + # it must be last step in order to provide the outer class + # object(Resource) @classmethod def build(cls): if cls.dSource is None: - raise Exception("If this object is for dSource then set True else set it False") + raise Exception( + "If this object is for dSource then set True else set " + "it False" + ) return Resource(cls) def __repr__(self): """ - overriding the __repr__ method. To print contents of Resource object, use print(obj) + overriding the __repr__ method. To print contents of Resource object, + use print(obj) :return:None """ - return "\nObjectBuilder(connection: {0.connection!r}, repository: {0.repository!r}, \n source_config: {0.source_config!r}, snapshot_parameters:{0.snapshot_parameters!r},\ - staged_source: {0.staged_source!r}, virtual_source:{0.virtual_source!r}, snapshot: {0.snapshot!r}, parameters:{0.parameters!r},dSource: {0.dSource!r})".format( - self) + return ( + "\nObjectBuilder(connection: {0.connection!r}, repository: " + "{0.repository!r}, \n source_config: {0.source_config!r}, " + "snapshot_parameters:{0.snapshot_parameters!r},\ + staged_source: {0.staged_source!r}, " + "virtual_source:{0.virtual_source!r}, snapshot: " + "{0.snapshot!r}, parameters:{0.parameters!r},dSource: " + "{0.dSource!r})".format(self) + ) def __str__(self): - return repr(self) \ No newline at end of file + return repr(self) diff --git a/src/db_commands/commands.py b/src/db_commands/commands.py index 1a4b707..96626ec 100644 --- a/src/db_commands/commands.py +++ b/src/db_commands/commands.py @@ -1,17 +1,29 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2024 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -CommandFactory class contains all commands required to perform couchbase and OS related operations -These are a list of commands which are being used in this project. Have segregated both types of commands into two -classes DatabaseCommand and OSCommand. CommandFactory is the actual class through which the command string will be -returned. In the last section of this file, we have created small tests for all these commands with dummy values. -Through which we can see the actual command is going to execute. All methods are decorated to @staticmethod, -so no need to create an object of the class, we can use the direct class name to use any command method +CommandFactory class contains all commands required to perform couchbase and +OS related operations +These are a list of commands which are being used in this project. Have +segregated both types of commands into two +classes DatabaseCommand and OSCommand. CommandFactory is the actual class +through which the command string will be +returned. In the last section of this file, we have created small tests for +all these commands with dummy values. +Through which we can see the actual command is going to execute. All methods +are decorated to @staticmethod, +so no need to create an object of the class, we can use the direct class name +to use any command method. """ -####################################################################################################################### +############################################################################## + + +import logging +import urllib.parse + +logger = logging.getLogger(__name__) class OSCommand(object): @@ -19,64 +31,209 @@ def __init__(self): pass @staticmethod - def find_binary_path(): + def find_binary_path(**kwargs): return "echo $COUCHBASE_PATH" @staticmethod - def find_install_path(binary_path): - return "find {binary_path} -name couchbase-server".format(binary_path=binary_path) + def find_install_path(binary_path, **kwargs): + return "find {binary_path} -name couchbase-server".format( + binary_path=binary_path + ) @staticmethod - def find_shell_path(binary_path): - return "find {binary_path} -name couchbase-cli".format(binary_path=binary_path) + def find_shell_path(binary_path, **kwargs): + return "find {binary_path} -name couchbase-cli".format( + binary_path=binary_path + ) @staticmethod def get_process(): return "ps -ef" @staticmethod - def make_directory(directory_path): - return "mkdir -p {directory_path}".format(directory_path=directory_path) + def make_directory(directory_path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} mkdir -p {directory_path}".format( + uid=uid, directory_path=directory_path + ) + else: + return "mkdir -p {directory_path}".format( + directory_path=directory_path + ) @staticmethod - def change_permission(directory_path): - return "chmod -R 775 {directory_path}".format(directory_path=directory_path) + def change_permission(path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} chmod -R 775 {path}".format( + uid=uid, path=path + ) + else: + return "chmod -R 775 {path}".format(path=path) @staticmethod - def get_config_directory(mount_path): + def get_config_directory(mount_path, **kwargs): return "{mount_path}/.delphix".format(mount_path=mount_path) @staticmethod - def read_file(filename): + def read_file(filename, **kwargs): return "cat {filename}".format(filename=filename) @staticmethod - def check_file(file_path): - return "[ -f {file_path} ] && echo 'Found'".format(file_path=file_path) + def check_file(file_path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} [ -f {file_path} ] && echo 'Found'".format( + file_path=file_path, uid=uid + ) + else: + return "[ -f {file_path} ] && echo 'Found'".format( + file_path=file_path + ) @staticmethod - def write_file(filename, data): - return "echo {data} > {filename}".format(filename=filename, data=data) + def write_file(filename, data, sudo=False, uid=None, **kwargs): + if sudo: + return f"sudo -u \#{uid} echo {data} > {filename}" + else: + return "echo {data} > {filename}".format( + filename=filename, data=data + ) @staticmethod - def get_ip_of_hostname(): - return "hostname -i" + def get_ip_of_hostname(**kwargs): + return "hostname -I" @staticmethod - def check_directory(dir_path): - return "[ -d {dir_path} ] && echo 'Found'".format(dir_path=dir_path) + def check_directory(dir_path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} [ -d {dir_path} ] && echo 'Found'".format( + dir_path=dir_path, uid=uid + ) + else: + return "[ -d {dir_path} ] && echo 'Found'".format( + dir_path=dir_path + ) @staticmethod - def delete_file(filename): + def delete_file(filename, **kwargs): return "rm -f {filename}".format(filename=filename) @staticmethod - def get_dlpx_bin(): + def delete_dir(dirname, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} rm -rf {dirname}".format( + dirname=dirname, uid=uid + ) + else: + return "rm -rf {dirname}".format(dirname=dirname) + + @staticmethod + def os_mv(srcname, trgname, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} mv {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) + else: + return "mv {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) + + @staticmethod + def os_cp(srcname, trgname, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} cp {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) + else: + return "cp {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) + + @staticmethod + def os_cpr(srcname, trgname, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} cp -r {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) + else: + return "cp -r {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) + + @staticmethod + def os_ls(dir_path, sudo=False, uid=None, **kwargs): + if sudo: + return f"sudo -u \#{uid} ls {dir_path}" + else: + return f"ls {dir_path}" + + @staticmethod + def get_dlpx_bin(**kwargs): return "echo $DLPX_BIN_JQ" @staticmethod - def unmount_file_system(mount_path): - return "sudo /bin/umount {mount_path}".format(mount_path=mount_path) + def unmount_file_system(mount_path, **kwargs): + if "options" in kwargs: + options = kwargs.pop("options") + else: + options = "" + return "sudo /bin/umount {options} {mount_path}".format( + mount_path=mount_path, options=options + ) + + @staticmethod + def whoami(**kwargs): + return "id" + + @staticmethod + def sed(filename, regex, sudo=False, uid=None, **kwargs): + if sudo: + return 'sudo -u \#{uid} sed -i -e "{regex}" {filename}'.format( + regex=regex, filename=filename, uid=uid + ) + else: + return 'sed -i -e "{}" {}'.format(regex, filename) + + @staticmethod + def cat(path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{uid} cat {path}".format(path=path, uid=uid) + else: + return "cat {path}".format(path=path) + + @staticmethod + def df(path, sudo=False, uid=None, **kwargs): + if sudo: + return f"sudo -u \#{uid} df -h {path}" + else: + return f"df -h {path}" + + @staticmethod + def mount(sudo=False, uid=None, **kwargs): + return "mount" + + @staticmethod + def resolve_name(hostname, **kwargs): + return ( + "getent ahostsv4 {hostname} | grep STREAM | head -n 1 | " + "cut -d ' ' -f 1".format(hostname=hostname) + ) + + @staticmethod + def du(mount_path: str, sudo=False, uid=None, **kwargs) -> str: + """ + Returns command string to get size of dataset. + + :param mount_path: The path whose size is to be calculated + + :return: The du command string + """ + if sudo: + return ( + f"sudo -u \#{uid} du -s --block-size=1 --apparent-size " + f"{mount_path}" + ) + else: + return f"du -s --block-size=1 --apparent-size {mount_path}" class DatabaseCommand(object): @@ -84,260 +241,1751 @@ def __init__(self): pass @staticmethod - def start_couchbase(install_path): - return "{install_path} \-- -noinput -detached .".format(install_path=install_path) + def get_parent_expect_block(): + exp_block = """ + set timeout 10 + match_max -d 5000000 + log_user 0 + {command_specific_operations} + lassign [wait] pid spawnid os_error_flag value + + if {{$os_error_flag == 0}} {{ + puts "DLPX_EXPECT_EXIT_CODE: $value" + }} else {{ + puts "errno: $value" + }} + set output $expect_out(buffer) + puts $output + """ + return exp_block + + @staticmethod + def start_couchbase(install_path, sudo=False, uid=None, **kwargs): + if sudo: + return ( + "sudo -u \#{} {install_path} \-- -noinput -detached .".format( + uid, install_path=install_path + ) + ) + else: + return "{install_path} \-- -noinput -detached .".format( + install_path=install_path + ) @staticmethod def get_version(install_path): return "{install_path} --version".format(install_path=install_path) + @staticmethod + def get_ids(install_path): + return "ls -n {install_path}".format(install_path=install_path) + @staticmethod def get_data_directory(couchbase_base_dir): - return "cat {couchbase_base_dir}/etc/couchbase/static_config|grep path_config_datadir".format( - couchbase_base_dir=couchbase_base_dir) - - @staticmethod - def stop_couchbase(install_path): - return "{install_path} -k".format(install_path=install_path) - - @staticmethod - def cluster_init(shell_path, - hostname, - port, - username, - cluster_ramsize, - cluster_name, - cluster_index_ramsize, - cluster_fts_ramsize, - cluster_eventing_ramsize, - cluster_analytics_ramsize, - additional_services - ): - return "{shell_path} cluster-init --cluster {hostname}:{port} --cluster-username {username} --cluster-password $password --cluster-ramsize {cluster_ramsize} --cluster-name {cluster_name} --cluster-index-ramsize {cluster_index_ramsize} --cluster-fts-ramsize {cluster_fts_ramsize} --cluster-eventing-ramsize {cluster_eventing_ramsize} --cluster-analytics-ramsize {cluster_analytics_ramsize} --services data,index,{additional_services}".format( - shell_path=shell_path, - hostname=hostname, - username=username, - port=port, - cluster_ramsize=cluster_ramsize, - cluster_name=cluster_name, - cluster_index_ramsize=cluster_index_ramsize, - cluster_fts_ramsize=cluster_fts_ramsize, - cluster_eventing_ramsize=cluster_eventing_ramsize, - cluster_analytics_ramsize=cluster_analytics_ramsize, - additional_services=additional_services - ) - - @staticmethod - def cluster_setting(shell_path, hostname, port, username, cluster_ramsize, cluster_name, cluster_index_ramsize, - cluster_fts_ramsize, cluster_eventing_ramsize, cluster_analytics_ramsize): - return "{shell_path} setting-cluster -c {hostname}:{port} -u {username} -p $password --cluster-ramsize {cluster_ramsize} --cluster-name {cluster_name} --cluster-index-ramsize {cluster_index_ramsize} --cluster-fts-ramsize {cluster_fts_ramsize} --cluster-eventing-ramsize {cluster_eventing_ramsize} --cluster-analytics-ramsize {cluster_analytics_ramsize}".format( - shell_path=shell_path, - hostname=hostname, - port=port, - username=username, - cluster_ramsize=cluster_ramsize, - cluster_name=cluster_name, - cluster_index_ramsize=cluster_index_ramsize, - cluster_fts_ramsize=cluster_fts_ramsize, - cluster_eventing_ramsize=cluster_eventing_ramsize, - cluster_analytics_ramsize=cluster_analytics_ramsize - ) + return ( + "cat {couchbase_base_dir}/etc/couchbase/static_config|grep " + "path_config_datadir".format(couchbase_base_dir=couchbase_base_dir) + ) + + @staticmethod + def stop_couchbase(install_path, sudo=False, uid=None, **kwargs): + if sudo: + return "sudo -u \#{} {install_path} -k".format( + uid, install_path=install_path + ) + else: + return "{install_path} -k".format(install_path=install_path) + + @staticmethod + def cluster_init( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + additional_services, + **kwargs, + ): + return ( + "{shell_path} cluster-init --cluster {hostname}:{port} " + "--cluster-username {username} --cluster-password $password " + "--cluster-ramsize {cluster_ramsize} --cluster-name " + "{cluster_name} --cluster-index-ramsize {cluster_index_ramsize}" + " --cluster-fts-ramsize {cluster_fts_ramsize} " + "--cluster-eventing-ramsize {cluster_eventing_ramsize} " + "--cluster-analytics-ramsize {cluster_analytics_ramsize} " + "--services data,index,{additional_services}".format( + shell_path=shell_path, + hostname=hostname, + username=username, + port=port, + cluster_ramsize=cluster_ramsize, + cluster_name=cluster_name, + cluster_index_ramsize=cluster_index_ramsize, + cluster_fts_ramsize=cluster_fts_ramsize, + cluster_eventing_ramsize=cluster_eventing_ramsize, + cluster_analytics_ramsize=cluster_analytics_ramsize, + additional_services=additional_services, + ) + ) + + @staticmethod + def cluster_init_rest_expect( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + additional_services, + **kwargs, + ): + payload_data = { + "hostname": "127.0.0.1", + "username": username, + "password": kwargs.get("password"), + "port": "SAME", + "memoryQuota": cluster_ramsize, + "clusterName": cluster_name, + "indexMemoryQuota": cluster_index_ramsize, + "ftsMemoryQuota": cluster_fts_ramsize, + "services": f"data,index,{additional_services}", + "indexerStorageMode": kwargs.get("indexerStorageMode"), + "afamily": "ipv4", + "afamilyOnly": "false", + "nodeEncryption": "off", + } + if cluster_eventing_ramsize is not None: + payload_data["eventingMemoryQuota"] = cluster_eventing_ramsize + if cluster_analytics_ramsize is not None: + payload_data["cbasMemoryQuota"] = cluster_analytics_ramsize + + payload_data["services"] = ( + payload_data["services"] + .replace("data", "kv") + .replace("query", "n1ql") + ) + payload_string = urllib.parse.urlencode(payload_data) + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://127.0.0.1:{port}/clusterInit -u {username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": "/tmp/run_shell.sh", + "SHELL_DATA": command, + "PAYLOAD_SECRET": payload_string, + } + return expect_block, env_vars + + @staticmethod + def cluster_setting( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + **kwargs, + ): + return ( + "{shell_path} setting-cluster -c {hostname}:{port} -u " + "{username} -p $password --cluster-ramsize {cluster_ramsize} " + "--cluster-name {cluster_name} " + "--cluster-index-ramsize {cluster_index_ramsize} " + "--cluster-fts-ramsize {cluster_fts_ramsize} " + "--cluster-eventing-ramsize {cluster_eventing_ramsize} " + "--cluster-analytics-ramsize " + "{cluster_analytics_ramsize}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + cluster_ramsize=cluster_ramsize, + cluster_name=cluster_name, + cluster_index_ramsize=cluster_index_ramsize, + cluster_fts_ramsize=cluster_fts_ramsize, + cluster_eventing_ramsize=cluster_eventing_ramsize, + cluster_analytics_ramsize=cluster_analytics_ramsize, + ) + ) + + @staticmethod + def cluster_setting_expect( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + **kwargs, + ): + command = ( + f"{shell_path} setting-cluster -c {hostname}:{port} -u " + f"{username} --password --cluster-ramsize " + f"{cluster_ramsize} --cluster-name {cluster_name} " + f"--cluster-index-ramsize {cluster_index_ramsize} " + f"--cluster-fts-ramsize {cluster_fts_ramsize} " + f"--cluster-eventing-ramsize {cluster_eventing_ramsize} " + f"--cluster-analytics-ramsize {cluster_analytics_ramsize}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def xdcr_setup( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --create --xdcr-hostname {hostname}:{port} " + "--xdcr-username {username} --xdcr-password $password " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + hostname=hostname, + port=port, + username=username, + cluster_name=cluster_name, + ) + ) + + @staticmethod + def xdcr_setup_expect( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + payload_data = { + "username": username, + "password": kwargs.get("password"), + "hostname": f"{hostname}:{port}", + "name": cluster_name, + "demandEncryption": 0, + } + payload_string = urllib.parse.urlencode(payload_data) + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://{source_hostname}:{source_port}/pools/default/" + f"remoteClusters -u {source_username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = { + "CB_PWD": kwargs.get("source_password"), + "CB_CMD": "/tmp/run_shell.sh", + "SHELL_DATA": command, + "PAYLOAD_SECRET": payload_string, + } + return expect_block, env_vars + + @staticmethod + def xdcr_replicate( + shell_path, + source_hostname, + source_port, + source_username, + source_bucket_name, + target_bucket_name, + cluster_name, + hostname, + port, + username, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --create --xdcr-from-bucket " + "{source_bucket_name} --xdcr-to-bucket {target_bucket_name} " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + source_bucket_name=source_bucket_name, + target_bucket_name=target_bucket_name, + cluster_name=cluster_name, + ) + ) + + @staticmethod + def xdcr_replicate_expect( + shell_path, + source_hostname, + source_port, + source_username, + source_bucket_name, + target_bucket_name, + cluster_name, + hostname, + port, + username, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--create --xdcr-from-bucket {source_bucket_name} " + f"--xdcr-to-bucket {target_bucket_name} " + f"--xdcr-cluster-name {cluster_name}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def get_replication_uuid( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --list".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + ) + ) + + @staticmethod + def get_replication_uuid_expect( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + command = ( + f"{shell_path} xdcr-setup --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--list" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def get_stream_id( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--list".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + ) + ) + + @staticmethod + def get_stream_id_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} " + f"--password --xdcr-cluster-name {cluster_name} --list" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def pause_replication( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--pause --xdcr-replicator={id}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + id=id, + ) + ) + + @staticmethod + def pause_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} " + f"--password --xdcr-cluster-name {cluster_name} " + f"--pause --xdcr-replicator={id}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def resume_replication( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--resume --xdcr-replicator={id}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + id=id, + ) + ) + + @staticmethod + def resume_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--xdcr-cluster-name {cluster_name} --resume " + f"--xdcr-replicator={id}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def delete_replication( + shell_path, + source_hostname, + source_port, + source_username, + id, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --delete --xdcr-replicator {id} " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + id=id, + cluster_name=cluster_name, + ) + ) + + @staticmethod + def delete_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + id, + cluster_name, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--delete --xdcr-replicator {id} --xdcr-cluster-name " + f"{cluster_name}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def xdcr_setup(shell_path, source_hostname, source_port, source_username, hostname, port, username, cluster_name): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --create --xdcr-hostname {hostname}:{port} --xdcr-username {username} --xdcr-password $password --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - hostname=hostname, - port=port, - username=username, - cluster_name=cluster_name + def xdcr_delete( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --delete --xdcr-hostname {hostname}:{port} " + "--xdcr-username {username} --xdcr-password $password " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + hostname=hostname, + port=port, + username=username, + cluster_name=cluster_name, + ) ) @staticmethod - def xdcr_replicate(shell_path, source_hostname, source_port, source_username, source_bucket_name, target_bucket_name, cluster_name, hostname, port, username): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --create --xdcr-from-bucket {source_bucket_name} --xdcr-to-bucket {target_bucket_name} --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - source_bucket_name=source_bucket_name, - target_bucket_name=target_bucket_name, - cluster_name=cluster_name + def xdcr_delete_expect( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + command = ( + f"curl -X DELETE http://{source_hostname}:{source_port}/" + f"pools/default/remoteClusters/{cluster_name} -u " + f"{source_username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def get_replication_uuid(shell_path, source_hostname, source_port, source_username): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --list".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, + def get_source_bucket_list( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + return ( + "{shell_path} bucket-list --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$password -o json".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + ) ) @staticmethod - def get_stream_id(shell_path, source_hostname, source_port, source_username, cluster_name): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --list".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name + def get_source_bucket_list_expect( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + command = ( + f"{shell_path} bucket-list --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"-o json" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def pause_replication(shell_path, source_hostname, source_port, source_username, cluster_name, id): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --pause --xdcr-replicator={id}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name, - id=id + def get_server_list(shell_path, hostname, port, username, **kwargs): + return ( + "{shell_path} server-list --cluster {hostname}:{port} " + "--username {username} --password $password".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod - def resume_replication(shell_path, source_hostname, source_port, source_username, cluster_name, id): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --resume --xdcr-replicator={id}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name, - id=id + def get_server_list_expect(shell_path, hostname, port, username, **kwargs): + command = ( + f"{shell_path} server-list --cluster {hostname}:{port} " + f"--username {username} --password" ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def delete_replication(shell_path, source_hostname, source_port, source_username, id, cluster_name): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --delete --xdcr-replicator {id} --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - id=id, - cluster_name=cluster_name + def node_init(shell_path, port, username, data_path, **kwargs): + return ( + "{shell_path} node-init --cluster 127.0.0.1:{port} " + "--username {username} --password $password " + "--node-init-data-path {data_path} --node-init-index-path " + "{data_path} --node-init-analytics-path {data_path} " + "--node-init-hostname 127.0.0.1".format( + shell_path=shell_path, + port=port, + username=username, + data_path=data_path, + ) ) @staticmethod - def xdcr_delete(shell_path, source_hostname, source_port, source_username, hostname, port, username, cluster_name): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --delete --xdcr-hostname {hostname}:{port} --xdcr-username {username} --xdcr-password $password --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - hostname=hostname, - port=port, - username=username, - cluster_name=cluster_name + def node_init_expect(shell_path, port, username, data_path, **kwargs): + command = ( + f"{shell_path} node-init --cluster 127.0.0.1:{port} " + f"--username {username} --password --node-init-data-path " + f"{data_path} --node-init-index-path {data_path} " + f"--node-init-analytics-path {data_path} " + f"--node-init-hostname 127.0.0.1" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def get_source_bucket_list(shell_path, source_hostname, source_port, source_username): - return "{shell_path} bucket-list --cluster {source_hostname}:{source_port} --username {source_username} --password $password".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, + def bucket_edit( + shell_path, + hostname, + port, + username, + bucket_name, + flush_value, + **kwargs, + ): + return ( + "{shell_path} bucket-edit --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name} --enable-flush {flush_value}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + flush_value=flush_value, + ) ) @staticmethod - def get_status(shell_path, hostname, port, username): - return "{shell_path} server-info --cluster {hostname}:{port} --username {username} --password $password".format( - shell_path=shell_path, hostname=hostname, port=port, username=username + def bucket_edit_expect( + shell_path, + hostname, + port, + username, + bucket_name, + flush_value, + **kwargs, + ): + command = ( + f"{shell_path} bucket-edit --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name} " + f"--enable-flush {flush_value}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def node_init(shell_path, port, username, mount_path): - return "{shell_path} node-init --cluster 127.0.0.1:{port} --username {username} --password $password --node-init-data-path {mount_path}/data --node-init-index-path {mount_path}/data --node-init-analytics-path {mount_path}/data --node-init-hostname 127.0.0.1".format( - shell_path=shell_path, port=port, username=username, mount_path=mount_path + def bucket_edit_ramquota( + shell_path, hostname, port, username, bucket_name, ramsize, **kwargs + ): + return ( + "{shell_path} bucket-edit --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name} --bucket-ramsize {ramsize}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ramsize=ramsize, + ) ) @staticmethod - def bucket_edit(shell_path, hostname, port, username, bucket_name, flush_value): - return "{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name} --enable-flush {flush_value}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name, - flush_value=flush_value + def bucket_edit_ramquota_expect( + shell_path, hostname, port, username, bucket_name, ramsize, **kwargs + ): + command = ( + f"{shell_path} bucket-edit --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name} " + f"--bucket-ramsize {ramsize}" ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def bucket_edit_ramquota(shell_path, hostname, port, username, bucket_name, ramsize): - return "{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name} --bucket-ramsize {ramsize}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name, - ramsize=ramsize + def bucket_delete( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + return ( + "{shell_path} bucket-delete --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ) ) @staticmethod - def bucket_delete(shell_path, hostname, port, username, bucket_name): - return "{shell_path} bucket-delete --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name + def bucket_delete_expect( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + command = ( + f"{shell_path} bucket-delete --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def bucket_flush(shell_path, hostname, port, username, bucket_name): - return "echo 'Yes' | {shell_path} bucket-flush --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name + def bucket_flush( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + return ( + "echo 'Yes' | {shell_path} bucket-flush --cluster " + "{hostname}:{port} --username {username} --password $password " + "--bucket={bucket_name}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ) ) @staticmethod - def bucket_create(shell_path, hostname, port, username, bucket_name, ramsize, evictionpolicy): - return "{shell_path} bucket-create --cluster 127.0.0.1:{port} --username {username} --password $password --bucket {bucket_name} --bucket-type couchbase --bucket-ramsize {ramsize} --bucket-replica 0 --bucket-eviction-policy {evictionpolicy} --compression-mode passive --conflict-resolution sequence --wait".format( - shell_path=shell_path, port=port, username=username, - bucket_name=bucket_name, ramsize=ramsize, evictionpolicy=evictionpolicy + def bucket_flush_expect( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + command = ( + f"{shell_path} bucket-flush --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re "Running this command will totally PURGE database data from disk. Do you really want to do it? (Yes/No).*" { + send "Yes\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" # noqa ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def bucket_list(shell_path, hostname, port, username): - return "{shell_path} bucket-list --cluster {hostname}:{port} --username {username} --password $password".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, + def bucket_create( + shell_path, + hostname, + port, + username, + bucket_name, + ramsize, + evictionpolicy, + bucket_type, + bucket_compression, + **kwargs, + ): + return ( + "{shell_path} bucket-create --cluster 127.0.0.1:{port} " + "--username {username} --password $password --bucket " + "{bucket_name} --bucket-type {bucket_type} --bucket-ramsize " + "{ramsize} --bucket-replica 0 --bucket-eviction-policy " + "{evictionpolicy} {bucket_compression} --conflict-resolution " + "sequence --wait".format( + shell_path=shell_path, + port=port, + username=username, + bucket_name=bucket_name, + ramsize=ramsize, + evictionpolicy=evictionpolicy, + bucket_type=bucket_type, + bucket_compression=bucket_compression, + ) ) @staticmethod - def get_indexes_name(base_path, hostname, port, username, index): - return "{base_path}/cbq -e {hostname}:{port} -u {username} -p $password -q=true -s=\"SELECT name FROM system:indexes where keyspace_id = {index} AND state = 'deferred'\"".format( - base_path=base_path, hostname=hostname, port=port, username=username, index=index + def bucket_create_expect( + shell_path, + hostname, + port, + username, + bucket_name, + ramsize, + evictionpolicy, + bucket_type, + bucket_compression, + **kwargs, + ): + command = ( + f"{shell_path} bucket-create --cluster 127.0.0.1:{port} " + f"--username {username} --password --bucket {bucket_name} " + f"--bucket-type {bucket_type} --bucket-ramsize {ramsize} " + f"--bucket-replica 0 --bucket-eviction-policy " + f"{evictionpolicy} {bucket_compression} " + f"--conflict-resolution sequence --wait" ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def build_index(base_path, hostname, port, username, index_name): - return "{base_path}/cbq -e {hostname}:{port} -u {username} -p $password -q=true -s=echo \"BUILD INDEX ON {index_name}\" {index_name}".format( - base_path=base_path, hostname=hostname, port=port, username=username, index_name=index_name + def bucket_list(shell_path, hostname, port, username, **kwargs): + return ( + "{shell_path} bucket-list --cluster {hostname}:{port} " + "--username {username} --password $password -o json".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod - def is_build_completed(base_path, hostname, port, username, index): - return "{base_path}/cbq -e {hostname}:{port} -u {username} -p $password -q=true -s=\"SELECT COUNT(*) as unbuilt FROM system:indexes WHERE keyspace_id ={index} AND state <> 'online'".format( - base_path=base_path, hostname=hostname, port=port, username=username, index=index + def bucket_list_expect(shell_path, hostname, port, username, **kwargs): + command = ( + f"{shell_path} bucket-list --cluster {hostname}:{port}" + f" --username {username} --password -o json" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def cb_backup_full(base_path, backup_location, backup_repo, hostname, port, username, csv_bucket_list): - return "{base_path}/cbbackupmgr restore --archive {backup_location} --repo {backup_repo} --cluster couchbase://{hostname}:{port} --username {username} --password $password --force-updates --no-progress-bar --include-buckets {csv_bucket_list}".format( - base_path=base_path, - backup_location=backup_location, - backup_repo=backup_repo, - hostname=hostname, - port=port, - username=username, - csv_bucket_list=csv_bucket_list + def get_indexes_name(hostname, port, username, **kwargs): + return ( + "curl {username}:$password@{hostname}:{port}/indexStatus".format( + hostname=hostname, port=port, username=username + ) ) @staticmethod - def monitor_replication(source_username, source_hostname, source_port, bucket_name, uuid): - return "curl --silent -u {source_username}:$password http://{source_hostname}:{source_port}/pools/default/buckets/{bucket_name}/stats/replications%2F{uuid}%2F{bucket_name}%2F{bucket_name}%2Fchanges_left".format( - source_username=source_username, - source_hostname=source_hostname, - source_port=source_port, - bucket_name=bucket_name, - uuid=uuid, + def get_indexes_name_expect(hostname, port, username, **kwargs): + command = f"curl -u {username} {hostname}:{port}/indexStatus" + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars @staticmethod - def server_info(shell_path, hostname, port, username): - return "{shell_path} server-info --cluster {hostname}:{port} --username {username} --password $password ".format( - shell_path=shell_path, hostname=hostname, port=port, username=username + def get_scope_list_expect(hostname, port, username, **kwargs): + command = ( + f"curl -u {username} {hostname}:{port}/pools/default/" + f"buckets/{kwargs.get('bucket_name')}/scopes" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def create_scope_expect(base_path, hostname, port, username, **kwargs): + command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" + cb_query = ( + f"CREATE SCOPE `{kwargs.get('bucket_name')}`." + f"{kwargs.get('scope_name')};" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + logger.debug(f"cb_query: {cb_query}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": command, + "CB_QUERY": cb_query, + } + return expect_block, env_vars + + @staticmethod + def create_collection_expect( + base_path, hostname, port, username, **kwargs + ): + command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" + cb_query = ( + f"CREATE COLLECTION `{kwargs.get('bucket_name')}`." + f"{kwargs.get('scope_name')}." + f"{kwargs.get('collection_name')};" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + logger.debug(f"cb_query: {cb_query}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": command, + "CB_QUERY": cb_query, + } + return expect_block, env_vars + + @staticmethod + def get_backup_bucket_list(path, sudo=False, uid=None, **kwargs): + if sudo: + return ( + "sudo -u \#{uid} find {path} -name bucket-config.json".format( + path=path, uid=uid + ) + ) + else: + return "find {path} -name bucket-config.json".format(path=path) + + @staticmethod + def build_index(base_path, hostname, port, username, index_def, **kwargs): + return ( + "{base_path}/cbq -e {hostname}:{port} -u {username} " + "-p $password -q=true -s='{index_def}'".format( + base_path=base_path, + hostname=hostname, + port=port, + username=username, + index_def=index_def, + ) + ) + + @staticmethod + def build_index_expect( + base_path, hostname, port, username, index_def, **kwargs + ): + command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + logger.debug(f"cb_query: {index_def}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": command, + "CB_QUERY": index_def, + } + return expect_block, env_vars + + @staticmethod + def check_index_build(base_path, hostname, port, username, **kwargs): + return ( + "{base_path}/cbq -e {hostname}:{port} -u {username} " + '-p $password -q=true -s="SELECT COUNT(*) as unbuilt ' + "FROM system:indexes WHERE state <> 'online'\"".format( + base_path=base_path, + hostname=hostname, + port=port, + username=username, + ) + ) + + @staticmethod + def check_index_build_expect( + base_path, hostname, port, username, **kwargs + ): + command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" + cb_query = ( + "SELECT COUNT(*) as unbuilt FROM system:indexes WHERE " + "state <> 'online'" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + logger.debug(f"cb_query: {cb_query}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": command, + "CB_QUERY": cb_query, + } + return expect_block, env_vars + + @staticmethod + def cb_backup_full( + base_path, + backup_location, + backup_repo, + hostname, + port, + username, + csv_bucket_list, + sudo, + uid, + skip, + **kwargs, + ): + if sudo: + return ( + "sudo -u \#{uid} {base_path}/cbbackupmgr restore " + "--archive {backup_location} --repo {backup_repo} " + "--cluster couchbase://{hostname}:{port} --username " + "{username} --password $password --force-updates {skip} " + "--no-progress-bar --include-buckets " + "{csv_bucket_list}".format( + base_path=base_path, + backup_location=backup_location, + backup_repo=backup_repo, + hostname=hostname, + port=port, + username=username, + csv_bucket_list=csv_bucket_list, + uid=uid, + skip=skip, + ) + ) + else: + return ( + "{base_path}/cbbackupmgr restore --archive " + "{backup_location} --repo {backup_repo} --cluster " + "couchbase://{hostname}:{port} --username {username} " + "--password $password --force-updates {skip} " + "--no-progress-bar --include-buckets " + "{csv_bucket_list}".format( + base_path=base_path, + backup_location=backup_location, + backup_repo=backup_repo, + hostname=hostname, + port=port, + username=username, + csv_bucket_list=csv_bucket_list, + skip=skip, + ) + ) + + @staticmethod + def cb_backup_full_expect( + base_path, + backup_location, + backup_repo, + hostname, + port, + username, + csv_bucket_list, + sudo, + uid, + skip, + **kwargs, + ): + if sudo: + command = ( + f"sudo -u \#{uid} {base_path}/cbbackupmgr restore " + f"--archive {backup_location} --repo {backup_repo} " + f"--cluster couchbase://{hostname}:{port} --username " + f"{username} --password --force-updates {skip} " + f"--no-progress-bar --include-buckets {csv_bucket_list}" + ) + else: + command = ( + f"{base_path}/cbbackupmgr restore --archive " + f"{backup_location} --repo {backup_repo} --cluster " + f"couchbase://{hostname}:{port} --username {username} " + f"--password --force-updates {skip} --no-progress-bar " + f"--include-buckets {csv_bucket_list}" + ) + if int(kwargs.get("repo_version").split(".")[0]) >= 7: + command = f"{command} --purge" + if kwargs.get("map_data") != "": + command = f"{command} --map-data {kwargs.get('map_data')}" + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Password:.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + -re "Password for --password:.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def monitor_replication( + source_username, + source_hostname, + source_port, + bucket_name, + uuid, + **kwargs, + ): + return ( + "curl --silent -u {source_username}:$password " + "http://{source_hostname}:{source_port}/pools/default/buckets" + "/{bucket_name}/stats/replications%2F{uuid}%2F{bucket_name}" + "%2F{bucket_name}%2Fchanges_left".format( + source_username=source_username, + source_hostname=source_hostname, + source_port=source_port, + bucket_name=bucket_name, + uuid=uuid, + ) + ) + + @staticmethod + def monitor_replication_expect( + source_username, + source_hostname, + source_port, + bucket_name, + uuid, + **kwargs, + ): + command = ( + f"curl --silent -u {source_username} " + f"http://{source_hostname}:{source_port}/pools/default/" + f"buckets/{bucket_name}/stats/replications%2F{uuid}%2F" + f"{bucket_name}%2F{bucket_name}%2Fchanges_left" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def couchbase_server_info(shell_path, hostname, username, port, **kwargs): + return ( + "{shell_path} server-info --cluster {hostname}:{port} " + "--username {username} --password $password".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) + ) + # return("{shell_path}".format(shell_path=shell_path)) + + @staticmethod + def couchbase_server_info_expect( + shell_path, hostname, username, port, **kwargs + ): + command = ( + f"{shell_path} server-info --cluster {hostname}:{port} " + f"--username {username} --password" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def rename_cluster( + shell_path, hostname, port, username, newuser, newname, **kwargs + ): + return ( + "{shell_path} setting-cluster --cluster {hostname}:{port} " + "--username {username} --password $password " + "--cluster-username {newuser} --cluster-password $newpass " + "--cluster-name {newname}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + newuser=newuser, + newname=newname, + ) + ) + + @staticmethod + def rename_cluster_expect( + shell_path, hostname, port, username, newname, **kwargs + ): + command = ( + f"curl -X POST http://{hostname}:{port}/pools/default " + f"-d clusterName={newname} -u {username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars + + @staticmethod + def change_cluster_password_expect( + shell_path, hostname, port, username, newuser, **kwargs + ): + payload_string = ( + f"password={kwargs.get('newpass')}&username={newuser}&port=SAME" + ) + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://{hostname}:{port}/settings/web -u {username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": "/tmp/run_shell.sh", + "SHELL_DATA": command, + "PAYLOAD_SECRET": payload_string, + } + return expect_block, env_vars + + @staticmethod + def server_add( + shell_path, hostname, port, username, newhost, services, **kwargs + ): + return ( + "{shell_path} server-add --cluster {hostname}:{port} " + "--username {username} --password $password \ + --server-add https://{newhost}:18091 --server-add-username " + "{username} --server-add-password $password \ + --services {services} --no-ssl-verify".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + services=services, + newhost=newhost, + ) + ) + + @staticmethod + def server_add_expect( + shell_path, hostname, port, username, newhost, services, **kwargs + ): + if kwargs.get("new_port") == "8091": + hostname_prefix = "http" + else: + hostname_prefix = "https" + payload_data = { + "hostname": f"{hostname_prefix}://{newhost}:" + f"{kwargs.get('new_port')}", + "user": username, + "password": kwargs.get("password"), + "services": services, + } + payload_data["services"] = ( + payload_data["services"] + .replace("data", "kv") + .replace("query", "n1ql") + ) + payload_string = urllib.parse.urlencode(payload_data) + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"{hostname}:8091/controller/addNode -u {username}" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" + ) + logger.debug(f"command: {command}") + env_vars = { + "CB_PWD": kwargs.get("password"), + "CB_CMD": "/tmp/run_shell.sh", + "SHELL_DATA": command, + "PAYLOAD_SECRET": payload_string, + } + return expect_block, env_vars + + @staticmethod + def rebalance(shell_path, hostname, port, username, **kwargs): + return ( + "{shell_path} rebalance --cluster {hostname}:{port} " + "--username {username} --password $password \ + --no-progress-bar".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) + ) + + @staticmethod + def rebalance_expect(shell_path, hostname, port, username, **kwargs): + command = ( + f"{shell_path} rebalance --cluster {hostname}:{port} " + f"--username {username} --password --no-progress-bar" + ) + expect_block = DatabaseCommand.get_parent_expect_block().format( + command_specific_operations="""eval spawn ${env(CB_CMD)} + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) + logger.debug(f"command: {command}") + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} + return expect_block, env_vars class CommandFactory(DatabaseCommand, OSCommand): @@ -385,74 +2033,3 @@ def __init__(self): hostname = "192.168.1.14" dir_path = "/var/tmp" DLPX_BIN_JQ = "/var/tmp" - print "find_install_path: ", CommandFactory.find_install_path(binary_path), "\n" - print "find_binary_path: ", CommandFactory.find_binary_path(), "\n" - print "find_install_path: ", CommandFactory.find_install_path(binary_path), "\n" - print "get_process: ", CommandFactory.get_process(), "\n" - print "get_version: ", CommandFactory.get_version(install_path), "\n" - print "start_couchbase: ", CommandFactory.start_couchbase(install_path), "\n" - print "get_data_directory: ", CommandFactory.get_data_directory("couchbase_base_dir"), "\n" - print "install_path: ", CommandFactory.stop_couchbase(install_path), "\n" - print "cluster_init: ", CommandFactory.cluster_init(shell_path, hostname, port, username, cluster_ramsize, - cluster_name, cluster_index_ramsize, cluster_fts_ramsize, - cluster_eventing_ramsize, cluster_analytics_ramsize, - additional_services), "\n" - print "cluster_setting: ", CommandFactory.cluster_setting(shell_path, hostname, port, username, cluster_ramsize, - cluster_name, cluster_index_ramsize, cluster_fts_ramsize, - cluster_eventing_ramsize, cluster_analytics_ramsize), "\n" - print "xdcr_setup: ", CommandFactory.xdcr_setup(shell_path, source_hostname, source_port, source_username, hostname, - port, username, cluster_name), "\n" - print "xdcr_replicate: ", CommandFactory.xdcr_replicate(shell_path, source_hostname, source_port, source_username, - source_bucket_name, target_bucket_name, cluster_name), "\n" - print "get_replication_uuid :", CommandFactory.get_replication_uuid(shell_path, source_hostname, source_port, - source_username), "\n" - print "get_stream_id:", CommandFactory.get_stream_id(shell_path, source_hostname, source_port, source_username, - cluster_name), "\n" - print "pause_replication:", CommandFactory.pause_replication(shell_path, source_hostname, source_port, - source_username, cluster_name, uuid), "\n" - print "resume_replication:", CommandFactory.resume_replication(shell_path, source_hostname, source_port, - source_username, cluster_name, uuid), "\n" - print "delete_replication:", CommandFactory.delete_replication(shell_path, source_hostname, source_port, - source_username, uuid, cluster_name), "\n" - print "xdcr_delete:", CommandFactory.xdcr_delete(shell_path, source_hostname, source_port, source_username, - hostname, port, username, cluster_name), "\n" - print "get_source_bucket_list:", CommandFactory.get_source_bucket_list(shell_path, source_hostname, source_port, - source_username), "\n" - print "get_status: ", CommandFactory.get_status(shell_path, hostname, port, username), "\n" - print "change_permission: ", CommandFactory.change_permission(directory_path), "\n" - print "make_directory: ", CommandFactory.make_directory(directory_path), "\n" - print "get_config_directory: ", CommandFactory.get_config_directory(mount_path), "\n" - print "node_init:", CommandFactory.node_init(shell_path, port, username, mount_path), "\n" - print "bucket_edit: ", CommandFactory.bucket_edit(shell_path, hostname, port, username, bucket_name, - flush_value), "\n" - print "bucket_edit_ramquota: ", CommandFactory.bucket_edit_ramquota(shell_path, hostname, port, username, - bucket_name, ramsize), "\n" - print "bucket_delete: ", CommandFactory.bucket_delete(shell_path, hostname, port, username, bucket_name), "\n" - print "bucket_flush: ", CommandFactory.bucket_flush(shell_path, hostname, port, username, bucket_name), "\n" - print "bucket_create: ", CommandFactory.bucket_create(shell_path, hostname, port, username, bucket_name, ramsize, - evictionpolicy), "\n" - print "bucket_list: ", CommandFactory.bucket_list(shell_path, hostname, port, username), "\n" - print "get_indexes_name: ", CommandFactory.get_indexes_name(base_path, hostname, port, username, index), "\n" - print "build_index: ", CommandFactory.build_index(base_path, hostname, port, username, index_name), "\n" - print "is_build_completed: ", CommandFactory.is_build_completed(base_path, hostname, port, username, index), "\n" - print "cb_backup_full: ", CommandFactory.cb_backup_full(base_path, backup_location, backup_repo, hostname, port, - username, csv_bucket_list), "\n" - print "monitor_replication: ", CommandFactory.monitor_replication(source_username, source_hostname, source_port, - bucket_name, uuid), "\n" - print "server_info: ", CommandFactory.server_info(shell_path, hostname, port, username), "\n" - - print "read_file: ", CommandFactory.read_file(filename), "\n" - - print "write_file: ", CommandFactory.write_file(filename, data), "\n" - - print "check_file: ", CommandFactory.check_file(file_path), "\n" - - print "get_ip_of_hostname: ", CommandFactory.get_ip_of_hostname(hostname), "\n" - - print "check_directory: ", CommandFactory.check_directory(dir_path), "\n" - - print "delete_file: ", CommandFactory.delete_file(filename), "\n" - - print "get_dlpx_bin: ", CommandFactory.get_dlpx_bin(), "\n" - - print "unmount_file_system: ", CommandFactory.unmount_file_system(mount_path), "\n" diff --git a/src/db_commands/constants.py b/src/db_commands/constants.py index a227242..1343b06 100644 --- a/src/db_commands/constants.py +++ b/src/db_commands/constants.py @@ -1,18 +1,20 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# This module is created to define constants values which are being used in this plugin -####################################################################################################################### +############################################################################## +# This module is created to define constants values which are being used in +# this plugin. +############################################################################## # Constants LOCK_SYNC_OPERATION = "DO_NOT_DELETE_DELPHIX_sync.lck" LOCK_SNAPSYNC_OPERATION = "DO_NOT_DELETE_DELPHIX_snapsync.lck" SRC_BUCKET_INFO_FILENAME = "couchbase_src_bucket_info.cfg" -ENV_VAR_KEY = 'environment_vars' +ENV_VAR_KEY = "environment_vars" StatusIsActive = "healthy" # it shows the status of server is good -DELPHIX_HIDDEN_FOLDER = ".delphix" # Folder inside which config file will create +# Folder inside which config file will create +DELPHIX_HIDDEN_FOLDER = ".delphix" CONFIG_FILE_NAME = "config.txt" EVICTION_POLICY = "valueOnly" DEFAULT_CB_BIN_PATH = "/opt/couchbase/bin" @@ -20,15 +22,31 @@ XDCR = "XDCR" -# String literals to match and throw particular type of exceptions. used by db_exception_handler.py -ALREADY_CLUSTER_INIT = "Cluster is already initialized, use setting-cluster to change settings" +# String literals to match and throw particular type of exceptions. +# used by db_exception_handler.py +ALREADY_CLUSTER_INIT = ( + "Cluster is already initialized, use setting-cluster to change settings" +) SHUTDOWN_FAILED = "shutdown failed" BUCKET_NAME_ALREADY_EXIST = "Bucket with given name already exists" -MULTIPLE_VDB_ERROR = "Changing data of nodes that are part of provisioned cluster is not supported" -CLUSTER_ALREADY_PRESENT = "Cluster reference to the same cluster already exists under the name" -ALREADY_CLUSTER_FOR_BUCKET= "Replication to the same remote cluster and bucket already exists" +MULTIPLE_VDB_ERROR = ( + "Changing data of nodes that are part of provisioned " + "cluster is not supported" +) +CLUSTER_ALREADY_PRESENT = ( + "Cluster reference to the same cluster already exists under the name" +) +ALREADY_CLUSTER_FOR_BUCKET = ( + "Replication to the same remote cluster and bucket already exists" +) # used by linked.py -ALREADY_SYNC_FILE_PRESENT_ON_HOST = "Not cleaning lock files as not created by this job. Also check, is there any XDCR set up on this host. If yes " \ - "then sync file should not be deleted " -RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS = "dSource Creation / Snapsync for dSource {} is in progress. Same staging server {} cannot be used for other operations" +ALREADY_SYNC_FILE_PRESENT_ON_HOST = ( + "Not cleaning lock files as not created by this job. " + "Also check, is there any XDCR set up on this host. If yes " + "then sync file should not be deleted " +) +RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS = ( + "dSource Creation / Snapsync for dSource {} is in progress. " + "Same staging server {} cannot be used for other operations" +) diff --git a/src/internal_exceptions/base_exceptions.py b/src/internal_exceptions/base_exceptions.py index c5a2d85..eb51667 100644 --- a/src/internal_exceptions/base_exceptions.py +++ b/src/internal_exceptions/base_exceptions.py @@ -1,15 +1,18 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -We are defining two base classes for two types of exceptions: one is related to database & the other one is for -run-time errors in the plugin. Both classes are child class of Exception which is defined inside python -The purpose of segregation of these two kinds of exceptions is to get a more accurate message at runtime error. -All the exceptions created for the database will inherit the DatabaseException and these are defined in the current package +We are defining two base classes for two types of exceptions: one is related +to database & the other one is for run-time errors in the plugin. Both classes +are child class of Exception which is defined inside python +The purpose of segregation of these two kinds of exceptions is to get a more +accurate message at runtime error. +All the exceptions created for the database will inherit the DatabaseException +and these are defined in the current package. """ -####################################################################################################################### +############################################################################## from dlpx.virtualization.platform.exceptions import UserError @@ -28,8 +31,10 @@ def __init__(self, message, action, error_string): super(DatabaseException, self).__init__(message, action, error_string) -# Exceptions related to plugin operation like discovery, linking, virtualization are being handled using this. -# plugin_exceptions.py is responsible to catch and throw specific error message for each kind of delphix operation. +# Exceptions related to plugin operation like discovery, linking, +# virtualization are being handled using this. +# plugin_exceptions.py is responsible to catch and throw specific error +# message for each kind of delphix operation. class PluginException(UserConvertibleException): def __init__(self, message, action, error_string): super(PluginException, self).__init__(message, action, error_string) diff --git a/src/internal_exceptions/database_exceptions.py b/src/internal_exceptions/database_exceptions.py index ba44204..c339c1d 100644 --- a/src/internal_exceptions/database_exceptions.py +++ b/src/internal_exceptions/database_exceptions.py @@ -1,21 +1,23 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -from base_exceptions import DatabaseException +from internal_exceptions.base_exceptions import DatabaseException - -# Some of below defined exceptions are not being used currently but designed for future updates. +# Some of below defined exceptions are not being used currently but designed +# for future updates class DuplicateClusterError(DatabaseException): def __init__(self, message=""): message = "Duplicate cluster name found, " + message - super(DuplicateClusterError, self).__init__(message, - "Delete existing staging cluster configuration on source or use different staging cluster name ", - "Duplicate cluster names are not allowed" - ) + super(DuplicateClusterError, self).__init__( + message, + "Delete existing staging cluster configuration on source or use " + "different staging cluster name ", + "Duplicate cluster names are not allowed", + ) # When bucket list in snapshot is empty @@ -24,7 +26,8 @@ def __init__(self, message=""): message = "Please check configurations and try again, " + message super(FailedToReadBucketDataFromSnapshot, self).__init__( message, - "Bucket list is empty. Please verify if the bucket exist at source", + "Bucket list is empty. Please verify if the bucket exist at " + "source", "bucket list empty", ) @@ -32,15 +35,22 @@ def __init__(self, message=""): # Failed To start or stop the server class CouchbaseServicesError(DatabaseException): def __init__(self, message=""): - message = "Any of start/stop operation for couchbase service fails: " + message - super(CouchbaseServicesError, self).__init__(message, - "Please check the user permission and try again", - "Not able to stop the couchbase server") + message = ( + "Any of start/stop operation for couchbase service fails: " + + message + ) + super(CouchbaseServicesError, self).__init__( + message, + "Please check the user permission and try again", + "Not able to stop the couchbase server", + ) class BucketOperationError(DatabaseException): def __init__(self, message=""): message = "Bucket operation failed: " + message - super(BucketOperationError, self).__init__(message, - "Bucket related issue is observed ", - "Please see logs for more details") + super(BucketOperationError, self).__init__( + message, + "Bucket related issue is observed ", + "Please see logs for more details", + ) diff --git a/src/internal_exceptions/plugin_exceptions.py b/src/internal_exceptions/plugin_exceptions.py index 6e1018e..1258973 100644 --- a/src/internal_exceptions/plugin_exceptions.py +++ b/src/internal_exceptions/plugin_exceptions.py @@ -1,167 +1,215 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -Adding exceptions related to plugin. +Adding exceptions related to plugin """ -####################################################################################################################### +############################################################################## + +import logging from internal_exceptions.base_exceptions import PluginException +logger = logging.getLogger(__name__) + class RepositoryDiscoveryError(PluginException): def __init__(self, message=""): message = "Not able to search repository information, " + message - super(RepositoryDiscoveryError, self).__init__(message, - "Check the COUCHBASE_PATH & couchbase installation", - "Failed to search repository information") + super(RepositoryDiscoveryError, self).__init__( + message, + "Check the COUCHBASE_PATH & couchbase installation", + "Failed to search repository information", + ) # This exception will be raised when failed to find source config class SourceConfigDiscoveryError(PluginException): def __init__(self, message=""): message = "Failed to find source config, " + message - super(SourceConfigDiscoveryError, self).__init__(message, - "Stop the couchbase service if it is running", - "Not able to find source") + super(SourceConfigDiscoveryError, self).__init__( + message, + "Stop the couchbase service if it is running", + "Not able to find source", + ) class MultipleSyncError(PluginException): def __init__(self, message=""): - message = "Resynchronization is in progress for other dSource, " + message - super(MultipleSyncError, self).__init__(message, - "Please wait while the other resync operation completes and try again ", - "Staging host already in use. Only Serial operations supported for couchbase") + message = ( + "Resynchronization is in progress for other dSource, " + message + ) + super(MultipleSyncError, self).__init__( + message, + "Please wait while the other resync operation completes and try " + "again ", + "Staging host already in use. Only Serial operations supported " + "for couchbase", + ) class MultipleXDCRSyncError(PluginException): def __init__(self, message=""): message = "XDCR setup found on staging host " + message - super(MultipleXDCRSyncError, self).__init__(message, - "Please use different staging host", - "Multiple XDCR is not supported on single staging host") + super(MultipleXDCRSyncError, self).__init__( + message, + "Please use different staging host", + "Multiple XDCR is not supported on single staging host", + ) class MultipleSnapSyncError(PluginException): - def __init__(self, message=""): + def __init__(self, message="", filename=""): + logger.debug( + "Exception MultipleSnapSyncError file: {}".format(filename) + ) message = "SnapSync is running for any other dSource " + message - super(MultipleSnapSyncError, self).__init__(message, - "Please wait while the other operation completes and try again ", - "Staging host already in use for SNAP-SYNC. Only Serial operations supported for couchbase") + super(MultipleSnapSyncError, self).__init__( + message, + "Please wait while the other operation completes and try again " + "or delete a lock file {}".format(filename), + "Staging host already in use for SNAP-SYNC. Only Serial " + "operations supported for couchbase", + ) class FileIOError(PluginException): def __init__(self, message=""): message = "Failed to read/write operation from a file " + message - super(FileIOError, self).__init__(message, - "Verify the permission", - "Please check the logs for more details") + super(FileIOError, self).__init__( + message, + "Verify the permission", + "Please check the logs for more details", + ) class MountPathError(PluginException): def __init__(self, message=""): - message = "Failed to create mount path because another file system is already mounted " + message - super(MountPathError, self).__init__(message, - "Please re-try after the previous operation is completed", - "Please check the logs for more details") + message = ( + "Failed to create mount path because another file system is " + "already mounted " + message + ) + super(MountPathError, self).__init__( + message, + "Please re-try after the previous operation is completed", + "Please check the logs for more details", + ) class UnmountFileSystemError(PluginException): def __init__(self, message=""): - message = "Failed to unmount the file system from host in resync operation " + message - super(UnmountFileSystemError, self).__init__(message, - "Please try again", - "Please check the logs for more details") + message = ( + "Failed to unmount the file system from host in resync operation " + + message + ) + super(UnmountFileSystemError, self).__init__( + message, + "Please try again", + "Please check the logs for more details", + ) + + +class MountPathStaleError(PluginException): + def __init__(self, message=""): + message = "Failed to get the stale mount path information " + message + super(MountPathStaleError, self).__init__( + message, + "Please clean the stale mount point.", + "Please check the logs for more details", + ) ERR_RESPONSE_DATA = { - 'ERR_INSUFFICIENT_RAMQUOTA': { - 'MESSAGE': "Provided bucket size is not suffice to proceed", - 'ACTION': "Please change the bucket size and try again", - 'ERR_STRING': "RAM quota cannot be less than 100 MB" - }, - 'ERR_CBBKP_MGR1': { - 'MESSAGE': "Internal server error", - 'ACTION': "Please try again to run the previous operation", - 'ERR_STRING': "Internal server error while executing" - }, - - 'ERR_RESTORE_CLUSTER': { - 'MESSAGE': "Internal server error", - 'ACTION': "Please try again to run the previous operation", - 'ERR_STRING': "Error restoring cluster" - }, - 'ERR_BUCKET_LIST_EMPTY': { - 'MESSAGE': "Please check configurations and try again", - 'ACTION': "Bucket list is empty. Please verify if the bucket exist at source", - 'ERR_STRING': "bucket list empty", - }, - 'ERR_UNABLE_TO_CONNECT': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Unable to connect to host", - }, - 'ERR_UNRECOGNIZED_ARGS': { - 'MESSAGE': "Argument(s) mismatch. Please check logs for more details", - 'ACTION': "Please provide correct configuration details and try again", - 'ERR_STRING': "unrecognized arguments", - }, - 'ERR_INCORRECT_CREDENTIAL': { - 'MESSAGE': "Invalid credentials", - 'ACTION': "Try again with correct credentials", - 'ERR_STRING': "please check your username", - }, - 'ERR_REPLICATION_ALREADY_PRESENT': { - 'MESSAGE': "Duplicate cluster name found", - 'ACTION': "Delete existing staging cluster configuration on source or use different staging cluster name", - 'ERR_STRING': "Replication to the same remote cluster and bucket already exists", - }, - 'ERR_DUPLICATE_CLUSTER_NAME': { - 'MESSAGE': "Duplicate cluster name found", - 'ACTION': "Delete existing staging cluster configuration on source or use different staging cluster name ", - 'ERR_STRING': "Duplicate cluster names are not allowed", - }, - 'ERR_INTERNAL_SERVER_ERROR': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Internal server error, please retry your request", - }, - 'ERR_INTERNAL_SERVER_ERROR1': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Unable to connect to host", - }, - 'ERR_XDCR_OPERATION_ERROR': { - 'MESSAGE': "Unable to set up XDCR", - 'ACTION': "Please correct parameters and try again", - 'ERR_STRING': "Replication Error", - }, - - 'ERR_CB_BACKUP_MANGER_FAILED': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please verify the provided archive path and try again", - 'ERR_STRING': "Error restoring cluster: Bucket Backup", - }, - 'ERR_SERVICE_UNAVAILABLE_ERROR': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please try again ", - 'ERR_STRING': "is not available on target", - }, - 'ERR_UNEXPECTED_ERROR1': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please try again ", - 'ERR_STRING': "Running this command will totally PURGE database data from disk. Do you really want to do", - }, - 'ERR_INVALID_BACKUP_DIR': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Try again with correct archive location. ", - 'ERR_STRING': "Archive directory .* doesn't exist", - }, - 'DEFAULT_ERR': { - 'MESSAGE': "Internal error occurred, retry again", - 'ACTION': "Please check logs for more details", - 'ERR_STRING': "Default error string", + "ERR_INSUFFICIENT_RAMQUOTA": { + "MESSAGE": "Provided bucket size is not suffice to proceed", + "ACTION": "Please change the bucket size and try again", + "ERR_STRING": "RAM quota cannot be less than 100 MB", + }, + "ERR_CBBKP_MGR1": { + "MESSAGE": "Internal server error", + "ACTION": "Please try again to run the previous operation", + "ERR_STRING": "Internal server error while executing", + }, + "ERR_RESTORE_CLUSTER": { + "MESSAGE": "Internal server error", + "ACTION": "Please try again to run the previous operation", + "ERR_STRING": "Error restoring cluster", + }, + "ERR_BUCKET_LIST_EMPTY": { + "MESSAGE": "Please check configurations and try again", + "ACTION": "Bucket list is empty. Please verify if the bucket exist " + "at source", + "ERR_STRING": "bucket list empty", + }, + "ERR_UNABLE_TO_CONNECT": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Unable to connect to host", + }, + "ERR_UNRECOGNIZED_ARGS": { + "MESSAGE": "Argument(s) mismatch. Please check logs for more details", + "ACTION": "Please provide correct configuration details and try again", + "ERR_STRING": "unrecognized arguments", + }, + "ERR_INCORRECT_CREDENTIAL": { + "MESSAGE": "Invalid credentials", + "ACTION": "Try again with correct credentials", + "ERR_STRING": "please check your username", + }, + "ERR_REPLICATION_ALREADY_PRESENT": { + "MESSAGE": "Duplicate cluster name found", + "ACTION": "Delete existing staging cluster configuration on source " + "or use different staging cluster name", + "ERR_STRING": "Replication to the same remote cluster and bucket " + "already exists", + }, + "ERR_DUPLICATE_CLUSTER_NAME": { + "MESSAGE": "Duplicate cluster name found", + "ACTION": "Delete existing staging cluster configuration on source " + "or use different staging cluster name ", + "ERR_STRING": "Duplicate cluster names are not allowed", + }, + "ERR_INTERNAL_SERVER_ERROR": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Internal server error, please retry your request", + }, + "ERR_INTERNAL_SERVER_ERROR1": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Unable to connect to host", + }, + "ERR_XDCR_OPERATION_ERROR": { + "MESSAGE": "Unable to set up XDCR", + "ACTION": "Please correct parameters and try again", + "ERR_STRING": "Replication Error", + }, + "ERR_CB_BACKUP_MANGER_FAILED": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please verify the provided archive path and try again", + "ERR_STRING": "Error restoring cluster: Bucket Backup", + }, + "ERR_SERVICE_UNAVAILABLE_ERROR": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please try again ", + "ERR_STRING": "is not available on target", + }, + "ERR_UNEXPECTED_ERROR1": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please try again ", + "ERR_STRING": "Running this command will totally PURGE database " + "data from disk. Do you really want to do", + }, + "ERR_INVALID_BACKUP_DIR": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Try again with correct archive location. ", + "ERR_STRING": "Archive directory .* doesn't exist", + }, + "DEFAULT_ERR": { + "MESSAGE": "Internal error occurred, retry again", + "ACTION": "Please check logs for more details", + "ERR_STRING": "Default error string", }, } diff --git a/src/operations/config.py b/src/operations/config.py index 11910d6..700a506 100644 --- a/src/operations/config.py +++ b/src/operations/config.py @@ -1,14 +1,16 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This file contains global variables. There are some cases when we need to pass the parameters from one module to another -without using the function, then use global variables. We should try to avoid this approach. Although in some cases this -approach saves a good number of code lines. We should use this file only for that purpose. +This file contains global variables. There are some cases when we need to pass +the parameters from one module to another without using the function, then use +global variables. We should try to avoid this approach. +Although in some cases this approach saves a good number of code lines. +We should use this file only for that purpose """ -####################################################################################################################### +############################################################################## SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = True diff --git a/src/operations/discovery.py b/src/operations/discovery.py index 01bb4eb..36de2ab 100644 --- a/src/operations/discovery.py +++ b/src/operations/discovery.py @@ -1,19 +1,18 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This module contains the methods responsible for discovery operations. +This module contains the methods responsible for discovery operations """ -####################################################################################################################### +############################################################################## import logging -import sys from controller import helper_lib from generated.definitions import RepositoryDefinition -from internal_exceptions.base_exceptions import GenericUserError -from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError, SourceConfigDiscoveryError +from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError +from internal_exceptions.plugin_exceptions import SourceConfigDiscoveryError logger = logging.getLogger(__name__) @@ -21,35 +20,56 @@ def find_repos(source_connection): """ Args: - source_connection (RemoteConnection): The connection associated with the remote environment to run repository discovery + source_connection (RemoteConnection): The connection associated with + the remote environment to run repository discovery Returns: Object of RepositoryDefinition class """ try: binary_paths = helper_lib.find_binary_path(source_connection) repositories = [] - for binary_path in binary_paths.split(';'): + for binary_path in binary_paths.split(";"): if helper_lib.check_dir_present(source_connection, binary_path): - install_path = helper_lib.find_install_path(source_connection, binary_path) - shell_path = helper_lib.find_shell_path(source_connection, binary_path) - version = helper_lib.find_version(source_connection, install_path) + install_path = helper_lib.find_install_path( + source_connection, binary_path + ) + shell_path = helper_lib.find_shell_path( + source_connection=source_connection, + binary_path=binary_path, + ) + version = helper_lib.find_version( + source_connection=source_connection, + install_path=install_path, + ) + (uid, gid) = helper_lib.find_ids( + source_connection=source_connection, + install_path=install_path, + ) pretty_name = "Couchbase ({})".format(version) - repository_definition = RepositoryDefinition(cb_install_path=install_path, cb_shell_path=shell_path, - version=version, pretty_name=pretty_name) + repository_definition = RepositoryDefinition( + cb_install_path=install_path, + cb_shell_path=shell_path, + version=version, + pretty_name=pretty_name, + uid=uid, + gid=gid, + ) repositories.append(repository_definition) return repositories except RepositoryDiscoveryError as err: - err.to_user_error(), None, sys.exc_info()[2] + logger.exception(err) + raise err.to_user_error() except Exception as err: - logger.debug("find_repos: Caught unexpected exception!" + err.message) + logger.debug("find_repos: Caught unexpected exception!" + str(err)) raise def find_source(source_connection, repository): """ Args: - source_connection (RemoteConnection): The connection associated with the remote environment to run repository discovery + source_connection (RemoteConnection): The connection associated with + the remote environment to run repository discovery repository: Object of RepositoryDefinition Returns: @@ -57,28 +77,33 @@ def find_source(source_connection, repository): """ logger.debug("Finding source config...") try: - instance = helper_lib.is_instance_present_of_gosecrets(source_connection) + instance = helper_lib.is_instance_present_of_gosecrets( + source_connection, + ) if not instance: logger.debug("No Couchbase instance found on host") - logger.debug("Hostname: {}".format(source_connection.environment.host.name)) + logger.debug( + "Hostname: {}".format( + source_connection.environment.host.name, + ) + ) return [] else: logger.debug("Couchbase instance found on host") - logger.debug("Hostname: {}".format(source_connection.environment.host.name)) + logger.debug( + "Hostname: " + "{}".format( + source_connection.environment.host.name, + ) + ) return [] - # # We don't want to run code beyond this point to avoid showing existing couchbase instance. - # # Couchbase supports only 1 instance on server so that instance on host should be managed by delphix - # source_configs = [] - # PORT = 8091 - # pretty_name = "Couchbase:{}".format(PORT) - # hostname = source_connection.environment.host.name - # data_path = helper_lib.get_data_directory(source_connection,repository) - # data_path = os.path.join(data_path, "data") - # source_config = SourceConfigDefinition(couchbase_src_port=PORT, couchbase_src_host=hostname, pretty_name=pretty_name, db_path=data_path) - # source_configs.append(source_config) - # return source_configs + # # We don't want to run code beyond this point to avoid showing + # existing couchbase instance. + # # Couchbase supports only 1 instance on server so that instance + # on host should be managed by delphix except SourceConfigDiscoveryError as err: - raise err.to_user_error(), None, sys.exc_info()[2] + logger.exception(err) + raise err.to_user_error() except Exception as err: - logger.debug("find_source: Caught unexpected exception!" + err.message) + logger.debug("find_source: Caught unexpected exception!" + str(err)) raise diff --git a/src/operations/link_cbbkpmgr.py b/src/operations/link_cbbkpmgr.py index 88e5ef3..3c2fe2c 100644 --- a/src/operations/link_cbbkpmgr.py +++ b/src/operations/link_cbbkpmgr.py @@ -1,267 +1,222 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, functions defined for couchbase backup manager ingestion mechanism. -####################################################################################################################### +############################################################################## +# In this module, functions defined for couchbase backup manager ingestion +# mechanism +############################################################################## +import copy +import json import logging -import os -from dlpx.virtualization.platform import Status - -import db_commands.constants from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -from controller.helper_lib import get_bucket_size_in_MB, get_sync_lock_file_name from controller.resource_builder import Resource from generated.definitions import SnapshotDefinition -from internal_exceptions.plugin_exceptions import MultipleSyncError, MultipleSnapSyncError from operations import config +from operations import linking logger = logging.getLogger(__name__) -def resync_cbbkpmgr(staged_source, repository, source_config, input_parameters): +def resync_cbbkpmgr( + staged_source, repository, source_config, input_parameters +): dsource_type = input_parameters.d_source_type - bucket_size = staged_source.parameters.bucket_size - rx_connection = staged_source.staged_connection + dsource_name = source_config.pretty_name + couchbase_host = input_parameters.couchbase_host resync_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - - config_dir = resync_process.create_config_dir() - config.SYNC_FILE_NAME = config_dir + "/" + get_sync_lock_file_name(dsource_type, source_config.pretty_name) - src_bucket_info_filename = db_commands.constants.SRC_BUCKET_INFO_FILENAME - src_bucket_info_filename = os.path.dirname(config_dir) + "/" + src_bucket_info_filename - logger.debug("src_bucket_info_filename = {}".format(src_bucket_info_filename)) - - if helper_lib.check_file_present(rx_connection, config.SYNC_FILE_NAME): - logger.debug("Sync file is already created by other process") - config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False - raise MultipleSyncError("Sync file is already created by other process") - else: - # creating sync file - msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(source_config.pretty_name, - input_parameters.couchbase_host) - helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME) - - resync_process.restart_couchbase() - resync_process.node_init() - resync_process.cluster_init() + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + resync_process.check_and_update_archive_path() + + linking.check_for_concurrent( + resync_process, dsource_type, dsource_name, couchbase_host + ) + + # validate if this works as well for backup + linking.configure_cluster(resync_process) + logger.debug("Finding source and staging bucket list") - bucket_details_source = resync_process.source_bucket_list_offline(filename=src_bucket_info_filename) - bucket_details_staged = resync_process.bucket_list() - - config_setting = staged_source.parameters.config_settings_prov - logger.debug("Bucket names passed for configuration: {}".format(config_setting)) - - bucket_configured_staged = [] - if len(config_setting) > 0: - logger.debug("Getting bucket information from config") - for config_bucket in config_setting: - bucket_configured_staged.append(config_bucket["bucketName"]) - logger.debug("Filtering bucket name with size only from above output") - bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source, config_bucket["bucketName"]) - bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1]) - - if config_bucket["bucketName"] not in bucket_details_staged: - resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - else: - logger.debug("Bucket {} already present in staged environment. Recreating bucket ".format( - config_bucket["bucketName"])) - resync_process.bucket_remove(config_bucket["bucketName"]) - resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - - logger.debug("Finding buckets present at staged server") - bucket_details_staged = resync_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged)) - - logger.debug("Extra bucket found to delete:{} ".format(extra_bucket)) - for bucket in extra_bucket: - resync_process.bucket_remove(bucket) - else: - logger.debug("Finding buckets present at staged server with size") - all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source) - logger.debug("Filtering bucket name with size only from above output") - filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source) - for items in all_bkt_list_with_size: - if items: - logger.debug("Running bucket operations for {}".format(items)) - bkt_name, bkt_size = items.split(',') - - bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_size) - if bkt_name not in bucket_details_staged: - resync_process.bucket_create(bkt_name, bkt_size_mb) - else: - logger.debug( - "Bucket {} already present in staged environment. Recreating bucket ".format(bkt_name)) - resync_process.bucket_remove(bkt_name) - resync_process.bucket_create(bkt_name, bkt_size_mb) - - bucket_details_staged = resync_process.bucket_list() - filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket)) - logger.info("Extra bucket found to delete:{}".format(extra_bucket)) - for bucket in extra_bucket: - resync_process.bucket_remove(bucket) - - bucket_details_staged = resync_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - csv_bucket_list = ",".join(filter_bucket_list) + bucket_details_source = resync_process.source_bucket_list_offline() + bucket_details_staged = helper_lib.filter_bucket_name_from_output( + resync_process.bucket_list() + ) + + buckets_toprocess = linking.buckets_precreation( + resync_process, bucket_details_source, bucket_details_staged + ) + + csv_bucket_list = ",".join(buckets_toprocess) logger.debug("Started CB backup manager") + helper_lib.sleepForSecond(30) resync_process.cb_backup_full(csv_bucket_list) + helper_lib.sleepForSecond(30) + linking.build_indexes(resync_process) + logger.info("Stopping Couchbase") + resync_process.stop_couchbase() + resync_process.save_config("parent") -def pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parameters): - pre_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - bucket_size = input_parameters.bucket_size - rx_connection = staged_source.staged_connection - config_dir = pre_snapshot_process.create_config_dir() - config.SNAP_SYNC_FILE_NAME = config_dir + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION - src_bucket_info_filename = db_commands.constants.SRC_BUCKET_INFO_FILENAME - src_bucket_info_filename = os.path.dirname(config_dir) + "/" + src_bucket_info_filename - - if helper_lib.check_file_present(rx_connection, config.SNAP_SYNC_FILE_NAME): - logger.debug("File path is already created {}".format(config.SNAP_SYNC_FILE_NAME)) - config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False - raise MultipleSnapSyncError("SnapSync file is already created by other process") - else: - logger.debug("Creating lock file...") - msg = "dSource Creation / Snapsync for dSource {} is in progress. Same staging server {} cannot be used for other operations".format( - source_config.pretty_name, input_parameters.couchbase_host) - helper_lib.write_file(rx_connection, msg, config.SNAP_SYNC_FILE_NAME) - logger.debug("Re-ingesting from latest backup...") - pre_snapshot_process.start_couchbase() - pre_snapshot_process.node_init() - pre_snapshot_process.cluster_init() - bucket_details_source = pre_snapshot_process.source_bucket_list_offline( - filename=src_bucket_info_filename) - bucket_details_staged = pre_snapshot_process.bucket_list() - config_setting = staged_source.parameters.config_settings_prov - logger.debug("Buckets name passed for configuration: {}".format(config_setting)) - bucket_configured_staged = [] - if len(config_setting) != 0: - logger.debug("Inside config") - for config_bucket in config_setting: - logger.debug("Adding bucket names provided in config settings") - bucket_configured_staged.append(config_bucket["bucketName"]) - bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source, - config_bucket["bucketName"]) - bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1]) - - if config_bucket["bucketName"] not in bucket_details_staged: - pre_snapshot_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - else: - pre_snapshot_process.bucket_remove(config_bucket["bucketName"]) - pre_snapshot_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - bucket_details_staged = pre_snapshot_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged)) - logger.debug("Extra bucket found :{}".format(extra_bucket)) - for bucket in extra_bucket: - logger.debug("Deleting bucket {}".format(bucket)) - pre_snapshot_process.bucket_remove(bucket) - else: - all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source) - filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source) - logger.info("Creating the buckets") - for items in all_bkt_list_with_size: - if items: - bkt_name, bkt_size = items.split(',') - bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_size) - if bkt_name not in bucket_details_staged: - pre_snapshot_process.bucket_create(bkt_name, bkt_size_mb) - else: - logger.info( - "Bucket {} already present in staged environment. Recreating bucket ".format( - bkt_name)) - pre_snapshot_process.bucket_remove(bkt_name) - pre_snapshot_process.bucket_create(bkt_name, bkt_size_mb) - - bucket_details_staged = pre_snapshot_process.bucket_list() - filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket)) - logger.info("Extra bucket found :{}".format(extra_bucket)) - for bucket in extra_bucket: - pre_snapshot_process.bucket_remove(bucket) +def pre_snapshot_cbbkpmgr( + staged_source, repository, source_config, input_parameters +): + + # this is for normal snapshot + # logger.info("Do nothing version Couchbase") + + pre_snapshot_process = CouchbaseOperation( + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + old_archive_name = input_parameters.archive_name + new_archive_name = pre_snapshot_process.check_and_update_archive_path( + check_file=True + ) + dsource_type = input_parameters.d_source_type + dsource_name = source_config.pretty_name + couchbase_host = input_parameters.couchbase_host + linking.check_for_concurrent( + pre_snapshot_process, dsource_type, dsource_name, couchbase_host + ) + if old_archive_name == new_archive_name: + logger.debug("Finding source and staging bucket list") bucket_details_staged = pre_snapshot_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) + filter_bucket_list = helper_lib.filter_bucket_name_from_output( + bucket_details_staged + ) csv_bucket_list = ",".join(filter_bucket_list) - pre_snapshot_process.cb_backup_full(csv_bucket_list) - logger.info("Re-ingesting from latest backup complete.") + else: + logger.debug("Running resync process....for ingesting new backup!") + linking.configure_cluster(pre_snapshot_process) + + logger.debug("Finding source and staging bucket list") + bucket_details_source = ( + pre_snapshot_process.source_bucket_list_offline() + ) + bucket_details_staged = helper_lib.filter_bucket_name_from_output( + pre_snapshot_process.bucket_list() + ) + + buckets_toprocess = linking.buckets_precreation( + pre_snapshot_process, bucket_details_source, bucket_details_staged + ) + csv_bucket_list = ",".join(buckets_toprocess) + + pre_snapshot_process.cb_backup_full(csv_bucket_list) + logger.info("Re-ingesting from latest backup complete.") + + linking.build_indexes(pre_snapshot_process) logger.info("Stopping Couchbase") pre_snapshot_process.stop_couchbase() + pre_snapshot_process.save_config("parent") -def post_snapshot_cbbkpmgr(staged_source, repository, source_config, dsource_type): +def post_snapshot_cbbkpmgr( + staged_source, repository, source_config, dsource_type +): logger.info("In Post snapshot...") post_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) rx_connection = staged_source.staged_connection post_snapshot_process.start_couchbase() snapshot = SnapshotDefinition(validate=False) - bucket_list = [] bucket_details = post_snapshot_process.bucket_list() - if len(staged_source.parameters.config_settings_prov) != 0: - bucket_list = [] - for config_setting in staged_source.parameters.config_settings_prov: - bucket_list.append(helper_lib.get_bucket_name_with_size(bucket_details, config_setting["bucketName"])) - else: - bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(bucket_details) + # extract index + + ind = post_snapshot_process.get_indexes_definition() + logger.debug("indexes definition : {}".format(ind)) + snapshot.indexes = ind snapshot.db_path = staged_source.parameters.mount_path snapshot.couchbase_port = source_config.couchbase_src_port snapshot.couchbase_host = source_config.couchbase_src_host - snapshot.bucket_list = ":".join(bucket_list) + snapshot.bucket_list = json.dumps(bucket_details) snapshot.time_stamp = helper_lib.current_time() snapshot.snapshot_id = str(helper_lib.get_snapshot_id()) - logger.debug("snapshot schema: {}".format(snapshot)) + snapshot.couchbase_admin = post_snapshot_process.parameters.couchbase_admin + snapshot.couchbase_admin_password = ( + post_snapshot_process.parameters.couchbase_admin_password + ) + debug_snapshot = copy.deepcopy(snapshot) + debug_snapshot.couchbase_admin_password = "xxxxxxxx" + logger.debug("snapshot schema: {}".format(debug_snapshot)) logger.debug("Deleting the lock files") helper_lib.delete_file(rx_connection, config.SNAP_SYNC_FILE_NAME) helper_lib.delete_file(rx_connection, config.SYNC_FILE_NAME) - post_snapshot_process.stop_couchbase() - helper_lib.unmount_file_system(rx_connection, staged_source.parameters.mount_path) - logger.debug("Un mounting completed") return snapshot def start_staging_cbbkpmgr(staged_source, repository, source_config): start_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + + start_staging.delete_config() + # TODO error handling + start_staging.restore_config(what="current") start_staging.start_couchbase() def stop_staging_cbbkpmgr(staged_source, repository, source_config): stop_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) stop_staging.stop_couchbase() + stop_staging.save_config(what="current") + stop_staging.delete_config() def d_source_status_cbbkpmgr(staged_source, repository, source_config): - if helper_lib.check_dir_present(staged_source.staged_connection, staged_source.parameters.couchbase_bak_loc): - return Status.ACTIVE - return Status.INACTIVE - - -def unmount_file_system_in_error_case(staged_source, repository, source_config): + status_obj = CouchbaseOperation( + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + logger.debug( + "Checking status for D_SOURCE: {}".format(source_config.pretty_name) + ) + return status_obj.status() + + +def unmount_file_system_in_error_case( + staged_source, repository, source_config +): try: - logger.debug("Un-mounting file system as last operation was not successful") + logger.debug( + "Un-mounting file system as last operation was not successful" + ) obj = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) obj.stop_couchbase() - helper_lib.unmount_file_system(staged_source.staged_connection, staged_source.parameters.mount_path) + helper_lib.unmount_file_system( + staged_source.staged_connection, + staged_source.parameters.mount_path, + ) logger.debug("Un mounting completed") except Exception as err: - logger.debug("Un-mounting failed, reason: "+err.message) - + logger.debug("Un-mounting failed, reason: " + str(err)) diff --git a/src/operations/link_xdcr.py b/src/operations/link_xdcr.py index e3a3e67..abf671a 100644 --- a/src/operations/link_xdcr.py +++ b/src/operations/link_xdcr.py @@ -1,222 +1,197 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, functions defined for XDCR ingestion mechanism -####################################################################################################################### +############################################################################## +# In this module, functions defined for XDCR ingestion mechanism. +############################################################################## +import json import logging -import os -from generated.definitions import SnapshotDefinition -import db_commands.constants +import db_commands from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation from controller.resource_builder import Resource +from dlpx.virtualization.platform.exceptions import UserError from generated.definitions import SnapshotDefinition -from internal_exceptions.database_exceptions import DuplicateClusterError -from internal_exceptions.plugin_exceptions import MultipleSyncError, MultipleXDCRSyncError +from internal_exceptions.plugin_exceptions import MultipleSyncError from operations import config +from operations import linking logger = logging.getLogger(__name__) def resync_xdcr(staged_source, repository, source_config, input_parameters): + logger.debug("START resync_xdcr") + if input_parameters.xdcr_admin_password == "": + raise UserError("Source password is mandatory in XDCR dsource type!") dsource_type = input_parameters.d_source_type dsource_name = source_config.pretty_name - bucket_size = staged_source.parameters.bucket_size - rx_connection = staged_source.staged_connection resync_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - config_dir = resync_process.create_config_dir() - config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, dsource_name) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + couchbase_host = input_parameters.couchbase_host + + linking.check_for_concurrent( + resync_process, dsource_type, dsource_name, couchbase_host + ) + + linking.configure_cluster(resync_process) - if not verify_sync_lock_file_for_this_job(rx_connection, config.SYNC_FILE_NAME): - config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False - logger.debug("Sync file is already created by other dSource") - raise MultipleXDCRSyncError("Sync file is already created by other dSource") - else: - # creating sync file - msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(dsource_name, - input_parameters.couchbase_host) - helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME) - - resync_process.restart_couchbase() - resync_process.node_init() - resync_process.cluster_init() - already_set_up_done, name_conflict = resync_process.check_duplicate_replication( - resync_process.parameters.stg_cluster_name) - if already_set_up_done: - logger.info("No need to XDCR setup again") - elif name_conflict: - raise DuplicateClusterError("Already cluster is present") - else: - logger.info("First time XDCR set up") - resync_process.xdcr_setup() # common steps for both XDCR & CB back up - logger.debug("Finding source and staging bucket list") + bucket_details_source = resync_process.source_bucket_list() bucket_details_staged = resync_process.bucket_list() - config_setting = staged_source.parameters.config_settings_prov - logger.debug("Bucket names passed for configuration: {}".format(config_setting)) - bucket_configured_staged = [] - if len(config_setting) > 0: - logger.debug("Getting bucket information from config") - for config_bucket in config_setting: - bucket_configured_staged.append(config_bucket["bucketName"]) - logger.debug("Filtering bucket name with size only from above output") - bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source, config_bucket["bucketName"]) - bkt_size_mb = helper_lib.get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1]) - - if config_bucket["bucketName"] not in bucket_details_staged: - resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - else: - logger.debug("Bucket {} already present in staged environment. Recreating bucket ".format( - config_bucket["bucketName"])) - resync_process.bucket_remove(config_bucket["bucketName"]) - resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb) - resync_process.xdcr_replicate(config_bucket["bucketName"], config_bucket["bucketName"]) - - logger.debug("Finding buckets present at staged server") - bucket_details_staged = resync_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged)) - - logger.debug("Extra bucket found to delete:{} ".format(extra_bucket)) - for bucket in extra_bucket: - resync_process.bucket_remove(bucket) - else: - logger.debug("Finding buckets present at staged server with size") - all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source) - logger.debug("Filtering bucket name with size only from above output") - filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source) - for items in all_bkt_list_with_size: - if items: - logger.debug("Running bucket operations for {}".format(items)) - bkt_name, bkt_size = items.split(',') - - bkt_size_mb = helper_lib.get_bucket_size_in_MB(bucket_size, bkt_size) - if bkt_name not in bucket_details_staged: - resync_process.bucket_create(bkt_name, bkt_size_mb) - else: - logger.debug( - "Bucket {} already present in staged environment. Recreating bucket ".format(bkt_name)) - resync_process.bucket_remove(bkt_name) - resync_process.bucket_create(bkt_name, bkt_size_mb) - resync_process.xdcr_replicate(bkt_name, bkt_name) - - bucket_details_staged = resync_process.bucket_list() - filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket)) - logger.info("Extra bucket found to delete:{}".format(extra_bucket)) - for bucket in extra_bucket: - resync_process.bucket_remove(bucket) + buckets_toprocess = linking.buckets_precreation( + resync_process, bucket_details_source, bucket_details_staged + ) + + # run this for all buckets + resync_process.setup_replication() logger.debug("Finding staging_uuid & cluster_name on staging") - staging_uuid, cluster_name_staging = resync_process.get_replication_uuid() - bucket_details_staged = resync_process.bucket_list() - logger.debug("Filtering bucket name from output") - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - for bkt in filter_bucket_list: + staging_uuid = resync_process.get_replication_uuid() + + if staging_uuid is None: + logger.debug("Can't find a replication UUID after setting it up") + raise UserError("Can't find a replication UUID after setting it up") + + for bkt in buckets_toprocess: resync_process.monitor_bucket(bkt, staging_uuid) + linking.build_indexes(resync_process) + + logger.info("Stopping Couchbase") + resync_process.stop_couchbase() + resync_process.save_config("parent") + -def pre_snapshot_xdcr(staged_source, repository, source_config, input_parameters): +def pre_snapshot_xdcr( + staged_source, repository, source_config, input_parameters +): logger.info("In Pre snapshot...") pre_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - config.SNAP_SYNC_FILE_NAME = pre_snapshot_process.create_config_dir() + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION - # Don't care of sync.lck file as it will never de deleted even in post snapshot. - if helper_lib.check_file_present(staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME): - config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + config.SNAP_SYNC_FILE_NAME = ( + pre_snapshot_process.create_config_dir() + + "/" + + db_commands.constants.LOCK_SNAPSYNC_OPERATION + ) + # Don't care of sync.lck file as it will never de deleted even in post + # snapshot. + if helper_lib.check_file_present( + staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME + ): + config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = ( + False + ) raise MultipleSyncError() else: logger.debug("Creating lock file...") - msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(source_config.pretty_name, - input_parameters.couchbase_host) - helper_lib.write_file(staged_source.staged_connection, msg, config.SNAP_SYNC_FILE_NAME) + msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format( # noqa E501 + source_config.pretty_name, input_parameters.couchbase_host + ) + helper_lib.write_file( + staged_source.staged_connection, msg, config.SNAP_SYNC_FILE_NAME + ) logger.info("Stopping Couchbase") pre_snapshot_process.stop_couchbase() + pre_snapshot_process.save_config("parent") def post_snapshot_xdcr(staged_source, repository, source_config, dsource_type): logger.info("In Post snapshot...") post_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + # post_snapshot_process.save_config() post_snapshot_process.start_couchbase() snapshot = SnapshotDefinition(validate=False) - bucket_details = post_snapshot_process.bucket_list() - if len(staged_source.parameters.config_settings_prov) != 0: - bucket_list = [] - for config_setting in staged_source.parameters.config_settings_prov: - bucket_list.append(helper_lib.get_bucket_name_with_size(bucket_details, config_setting["bucketName"])) - else: - bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(bucket_details) + ind = post_snapshot_process.get_indexes_definition() + logger.debug("indexes definition : {}".format(ind)) + + snapshot.indexes = ind + + bucket_details = post_snapshot_process.bucket_list() snapshot.db_path = staged_source.parameters.mount_path snapshot.couchbase_port = source_config.couchbase_src_port snapshot.couchbase_host = source_config.couchbase_src_host - snapshot.bucket_list = ":".join(bucket_list) + snapshot.bucket_list = json.dumps(bucket_details) snapshot.time_stamp = helper_lib.current_time() snapshot.snapshot_id = str(helper_lib.get_snapshot_id()) - logger.debug("snapshot schema: {}".format(snapshot)) - logger.debug("Deleting the snap sync lock file {}".format(config.SNAP_SYNC_FILE_NAME)) - helper_lib.delete_file(staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME) + snapshot.couchbase_admin = post_snapshot_process.parameters.couchbase_admin + snapshot.couchbase_admin_password = ( + post_snapshot_process.parameters.couchbase_admin_password + ) + # logger.debug("snapshot schema: {}".format(snapshot)) + logger.debug( + "Deleting the snap sync lock file {}".format( + config.SNAP_SYNC_FILE_NAME + ) + ) + helper_lib.delete_file( + staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME + ) return snapshot def start_staging_xdcr(staged_source, repository, source_config): start_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name)) dsource_type = staged_source.parameters.d_source_type rx_connection = staged_source.staged_connection - start_staging.start_couchbase() - - already_set_up_done, name_conflict = start_staging.check_duplicate_replication( - start_staging.parameters.stg_cluster_name) - if already_set_up_done: - logger.info("No need to XDCR setup again") - elif name_conflict: - raise DuplicateClusterError("Already cluster is present") - else: - logger.info("First time XDCR set up") - start_staging.xdcr_setup() - config_setting = staged_source.parameters.config_settings_prov + start_staging.stop_couchbase() + start_staging.delete_config() + # TODO error handling + start_staging.restore_config(what="current") + start_staging.start_couchbase() - if len(config_setting) > 0: - for config_bucket in config_setting: - logger.debug("Creating replication for {}".format(config_bucket["bucketName"])) - start_staging.xdcr_replicate(config_bucket["bucketName"], config_bucket["bucketName"]) - else: - bucket_details_source = start_staging.source_bucket_list() - all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source) - for items in all_bkt_list_with_size: - bkt_name, bkt_size = items.split(',') - logger.debug("Creating replication for {}".format(bkt_name)) - start_staging.xdcr_replicate(bkt_name, bkt_name) - - config_dir = start_staging.create_config_dir() - msg = "dSource Creation / Snapsync for dSource {} is in progress".format(source_config.pretty_name) - helper_lib.write_file(rx_connection, msg, - config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, - source_config.pretty_name)) + start_staging.setup_replication() + + config_dir = start_staging.create_config_dir() + msg = "dSource Creation / Snapsync for dSource {} is in progress".format( + source_config.pretty_name + ) + helper_lib.write_file( + rx_connection, + msg, + config_dir + + "/" + + helper_lib.get_sync_lock_file_name( + dsource_type, source_config.pretty_name + ), + ) logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name)) def stop_staging_xdcr(staged_source, repository, source_config): stop_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Disabling the D_SOURCE:{}".format(source_config.pretty_name)) dsource_type = staged_source.parameters.d_source_type @@ -227,30 +202,15 @@ def stop_staging_xdcr(staged_source, repository, source_config): logger.info("Deleting XDCR") stop_staging.xdcr_delete(cluster_name) config_dir = stop_staging.create_config_dir() - helper_lib.delete_file(rx_connection, - config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, - source_config.pretty_name)) + helper_lib.delete_file( + rx_connection, + config_dir + + "/" + + helper_lib.get_sync_lock_file_name( + dsource_type, source_config.pretty_name + ), + ) stop_staging.stop_couchbase() + stop_staging.save_config(what="current") + stop_staging.delete_config() logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name)) - - -def d_source_status_xdcr(staged_source, repository, source_config): - status_obj = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - logger.debug("Checking status for D_SOURCE: {}".format(source_config.pretty_name)) - return status_obj.status() - - -def verify_sync_lock_file_for_this_job(rx_connection, sync_filename): - if helper_lib.check_file_present(rx_connection, sync_filename): - logger.debug("Sync File Present: {}".format(sync_filename)) - return True - config_dir = os.path.dirname(sync_filename) - - possible_sync_filename = "/*" + db_commands.constants.LOCK_SYNC_OPERATION - possible_sync_filename = config_dir + possible_sync_filename - logger.debug("Checking for {}".format(possible_sync_filename)) - if helper_lib.check_file_present(rx_connection, possible_sync_filename): - return False - return True diff --git a/src/operations/linked.py b/src/operations/linked.py index a7246e1..0436da7 100644 --- a/src/operations/linked.py +++ b/src/operations/linked.py @@ -1,23 +1,29 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2024 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, all dSource related operations are implemented. -####################################################################################################################### +############################################################################# +# In this module, all dSource related operations are implemented +############################################################################# import logging -import sys -import config import db_commands from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -from controller.resource_builder import Resource from controller.helper_lib import delete_file +from controller.resource_builder import Resource from db_commands import constants -from internal_exceptions.base_exceptions import PluginException, DatabaseException, GenericUserError -from internal_exceptions.plugin_exceptions import MountPathError -from operations import link_cbbkpmgr, link_xdcr +from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.base_exceptions import DatabaseException +from internal_exceptions.base_exceptions import GenericUserError +from internal_exceptions.base_exceptions import PluginException +from internal_exceptions.plugin_exceptions import MultipleSnapSyncError +from internal_exceptions.plugin_exceptions import MountPathStaleError +from dlpx.virtualization.platform import StagedSource +from operations import config +from operations import link_cbbkpmgr +from operations import link_xdcr +from operations import linking logger = logging.getLogger(__name__) @@ -26,63 +32,113 @@ def resync(staged_source, repository, source_config, input_parameters): logger.debug("Started ReSync...") try: if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.resync_cbbkpmgr(staged_source, repository, source_config, input_parameters) + link_cbbkpmgr.resync_cbbkpmgr( + staged_source, repository, source_config, input_parameters + ) elif input_parameters.d_source_type == constants.XDCR: - link_xdcr.resync_xdcr(staged_source, repository, source_config, input_parameters) + link_xdcr.resync_xdcr( + staged_source, repository, source_config, input_parameters + ) + + logger.debug("Completed resynchronization") + except UserError as ex_obj: + logger.exception(ex_obj) + raise + except Exception as ex_obj: - logger.debug("Caught exception {}".format(ex_obj.message)) - _cleanup_in_exception_case(staged_source.staged_connection, True, False) + logger.debug(str(ex_obj)) + _cleanup_in_exception_case( + staged_source.staged_connection, + True, + False, + ) if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) - if isinstance(ex_obj, PluginException) or isinstance(ex_obj, DatabaseException) or isinstance(ex_obj, GenericUserError): - raise ex_obj.to_user_error(), None, sys.exc_info()[2] + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) + logger.exception(ex_obj) + if ( + isinstance(ex_obj, PluginException) + or isinstance(ex_obj, DatabaseException) + or isinstance(ex_obj, GenericUserError) + ): + raise ex_obj.to_user_error() raise - logger.debug("Completed resynchronization") def pre_snapshot(staged_source, repository, source_config, input_parameters): logger.info("In Pre snapshot...") try: if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parameters) + link_cbbkpmgr.pre_snapshot_cbbkpmgr( + staged_source, repository, source_config, input_parameters + ) elif input_parameters.d_source_type == constants.XDCR: - link_xdcr.pre_snapshot_xdcr(staged_source, repository, source_config, input_parameters) + link_xdcr.pre_snapshot_xdcr( + staged_source, repository, source_config, input_parameters + ) + logger.debug("Completed Pre-snapshot") + except UserError: + raise except Exception as ex_obj: - logger.debug("Caught exception: {}".format(ex_obj.message)) + logger.debug("Caught exception: {}".format(str(ex_obj))) _cleanup_in_exception_case(staged_source.staged_connection, True, True) if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) - if isinstance(ex_obj, PluginException) or isinstance(ex_obj, DatabaseException) or isinstance(ex_obj, GenericUserError): - raise ex_obj.to_user_error(), None, sys.exc_info()[2] + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) + logger.exception(ex_obj) + if ( + isinstance(ex_obj, PluginException) + or isinstance(ex_obj, DatabaseException) + or isinstance(ex_obj, GenericUserError) + ): + raise ex_obj.to_user_error() raise - logger.debug("Completed Pre-snapshot") def post_snapshot(staged_source, repository, source_config, dsource_type): logger.info("In Post snapshot...") try: if dsource_type == constants.CBBKPMGR: - return link_cbbkpmgr.post_snapshot_cbbkpmgr(staged_source, repository, source_config, dsource_type) + return link_cbbkpmgr.post_snapshot_cbbkpmgr( + staged_source, repository, source_config, dsource_type + ) elif dsource_type == constants.XDCR: - return link_xdcr.post_snapshot_xdcr(staged_source, repository, source_config, dsource_type) + return link_xdcr.post_snapshot_xdcr( + staged_source, repository, source_config, dsource_type + ) + logger.debug("Completed Post-snapshot") + except UserError: + raise except Exception as err: - logger.debug("Caught exception in post snapshot: {}".format(err.message)) + logger.debug("Caught exception in post snapshot: {}".format(str(err))) _cleanup_in_exception_case(staged_source.staged_connection, True, True) if dsource_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) raise - logger.debug("Completed Post-snapshot") def start_staging(staged_source, repository, source_config): logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name)) try: if staged_source.parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.start_staging_cbbkpmgr(staged_source, repository, source_config) + link_cbbkpmgr.start_staging_cbbkpmgr( + staged_source, repository, source_config + ) elif staged_source.parameters.d_source_type == constants.XDCR: - link_xdcr.start_staging_xdcr(staged_source, repository, source_config) + link_xdcr.start_staging_xdcr( + staged_source, + repository, + source_config, + ) + logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name)) + except UserError: + raise except Exception as err: - logger.debug("Enable operation is failed!" + err.message) + logger.debug("Enable operation is failed!" + str(err)) raise @@ -90,49 +146,129 @@ def stop_staging(staged_source, repository, source_config): logger.debug("Disabling the D_SOURCE:{}".format(source_config.pretty_name)) try: if staged_source.parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.stop_staging_cbbkpmgr(staged_source, repository, source_config) + link_cbbkpmgr.stop_staging_cbbkpmgr( + staged_source, repository, source_config + ) elif staged_source.parameters.d_source_type == constants.XDCR: - link_xdcr.stop_staging_xdcr(staged_source, repository, source_config) + link_xdcr.stop_staging_xdcr( + staged_source, + repository, + source_config, + ) + logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name)) + except UserError: + raise except Exception as err: - logger.debug("Disable operation is failed!" + err.message) + logger.debug("Disable operation is failed!" + str(err)) raise - logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name)) def d_source_status(staged_source, repository, source_config): - if staged_source.parameters.d_source_type == constants.CBBKPMGR: - return link_cbbkpmgr.d_source_status_cbbkpmgr(staged_source, repository, source_config) - elif staged_source.parameters.d_source_type == constants.XDCR: - return link_xdcr.d_source_status_xdcr(staged_source, repository, source_config) + return linking.d_source_status(staged_source, repository, source_config) -#This function verifies that LOCK_SNAPSYNC_OPERATION or LOCK_SYNC_OPERATION is present in hidden folder or not -#If any file is present then it will raise exception -#This function does not cover the case for XDCR sync file presence. +# This function verifies that LOCK_SNAPSYNC_OPERATION or LOCK_SYNC_OPERATION +# is present in hidden folder or not +# If any file is present then it will raise exception +# This function does not cover the case for XDCR sync file presence. def check_mount_path(staged_source, repository): mount_path_check = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).build()) - snapsync_filename = mount_path_check.create_config_dir() + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION - sync_filename = mount_path_check.create_config_dir() + "/" + db_commands.constants.LOCK_SYNC_OPERATION - if helper_lib.check_file_present(staged_source.staged_connection, snapsync_filename) : - raise MountPathError("Another Snap-Sync process is in progress ").to_user_error(), None, sys.exc_info()[2] - if helper_lib.check_file_present(staged_source.staged_connection, sync_filename): - raise MountPathError("Another Sync process is in progress ").to_user_error(), None, sys.exc_info()[2] + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .build() + ) + snapsync_filename = ( + mount_path_check.create_config_dir() + + "/" + + db_commands.constants.LOCK_SNAPSYNC_OPERATION + ) + sync_filename = ( + mount_path_check.create_config_dir() + + "/" + + db_commands.constants.LOCK_SYNC_OPERATION + ) + if helper_lib.check_file_present( + staged_source.staged_connection, snapsync_filename + ): + raise MultipleSnapSyncError( + "Another Snap-Sync process is in progress ", snapsync_filename + ).to_user_error() + file_present = helper_lib.check_file_present( + staged_source.staged_connection, sync_filename + ) + if file_present: + raise MultipleSnapSyncError( + "Another Sync process is in progress ", sync_filename + ).to_user_error() return True # Below are specific functions for this module only + def _cleanup_in_exception_case(rx_connection, is_sync, is_snap_sync): logger.debug("In clean up") try: - if is_snap_sync and config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + is_snap_sync + and config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + ): delete_file(rx_connection, config.SNAP_SYNC_FILE_NAME) - if is_sync and config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + is_sync + and config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + ): delete_file(rx_connection, config.SYNC_FILE_NAME) - if not config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED or \ - not config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + not config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + or not config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED + ): logger.debug(constants.ALREADY_SYNC_FILE_PRESENT_ON_HOST) - except Exception as err : - logger.debug("Failed to clean up the lock files {}".format(err.message)) + except Exception as err: + logger.debug("Failed to clean up the lock files {}".format(str(err))) + raise + + +def source_size(source_obj: StagedSource, repository, source_config): + """ + Returns space occupied by the dataset on the mount point in bytes. + + :param source_obj: staged_source object corresponding to dsource + + :return: Storage occupied in the mount point in bytes + """ + connection = source_obj.staged_connection + mount_path = source_obj.parameters.mount_path + cluster_name = source_obj.parameters.stg_cluster_name + logger.info( + "Begin operation: Calculation of source" + f" sizing for dSource {cluster_name}." + ) + srcsize_obj = CouchbaseOperation( + Resource.ObjectBuilder.set_staged_source(source_obj) + .set_repository(repository) + .build() + ) + try: + if not helper_lib.check_stale_mountpoint( + connection=connection, path=mount_path + ) and helper_lib.check_server_is_used( + connection=connection, path=mount_path + ): + db_size_output = srcsize_obj.get_db_size(path=mount_path) + if db_size_output: + db_size = int(db_size_output.split()[0]) + logger.debug( + f"mount_point={mount_path} , " + f"db_size_calculated={db_size}" + ) + logger.info( + "End operation: Calculation of source" + f" sizing for dSource {cluster_name}." + ) + return db_size + else: + raise MountPathStaleError(message=mount_path) + except Exception as error: + logger.debug("Exception: {}".format(str(error))) raise diff --git a/src/operations/linking.py b/src/operations/linking.py new file mode 100644 index 0000000..00a83bf --- /dev/null +++ b/src/operations/linking.py @@ -0,0 +1,264 @@ +# +# Copyright (c) 2020-2023 by Delphix. All rights reserved. +# +############################################################################## +# In this module, functions defined common ingestion modes - backup and xdcr +############################################################################## + +import logging +import os +import time + +import db_commands +from controller import helper_lib +from controller.couchbase_operation import CouchbaseOperation +from controller.resource_builder import Resource +from dlpx.virtualization.platform import Status +from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.plugin_exceptions import MultipleXDCRSyncError +from operations import config + +logger = logging.getLogger(__name__) + + +# potentially to remove - as checks are done on the mount points +def check_for_concurrent( + couchbase_obj, + dsource_type, + dsource_name, + couchbase_host, +): + config_dir = couchbase_obj.create_config_dir() + + config.SYNC_FILE_NAME = ( + config_dir + + "/" + + helper_lib.get_sync_lock_file_name(dsource_type, dsource_name) + ) + + delphix_config_dir = couchbase_obj.get_config_directory() + logger.debug("Check if we have config dir in Delphix storage") + if not helper_lib.check_dir_present( + couchbase_obj.connection, + delphix_config_dir, + ): + logger.debug("make Delphix storage dir:{}".format(delphix_config_dir)) + couchbase_obj.make_directory(delphix_config_dir) + + if not verify_sync_lock_file_for_this_job( + couchbase_obj.connection, config.SYNC_FILE_NAME + ): + config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False + logger.debug("Sync file is already created by other dSource") + raise MultipleXDCRSyncError( + "Sync file is already created by other dSource", + ) + else: + # creating sync file + msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format( # noqa E501 + dsource_name, couchbase_host + ) + helper_lib.write_file( + couchbase_obj.connection, + msg, + config.SYNC_FILE_NAME, + ) + + +def verify_sync_lock_file_for_this_job(rx_connection, sync_filename): + if helper_lib.check_file_present(rx_connection, sync_filename): + logger.debug("Sync File Present: {}".format(sync_filename)) + return True + config_dir = os.path.dirname(sync_filename) + + possible_sync_filename = "/*" + db_commands.constants.LOCK_SYNC_OPERATION + possible_sync_filename = config_dir + possible_sync_filename + logger.debug("Checking for {}".format(possible_sync_filename)) + if helper_lib.check_file_present(rx_connection, possible_sync_filename): + return False + return True + + +def configure_cluster(couchbase_obj): + # configure Couchbase cluster + + logger.debug("Checking cluster config") + if couchbase_obj.check_config(): + logger.debug("cluster config found - restoring") + couchbase_obj.stop_couchbase() + couchbase_obj.restore_config() + couchbase_obj.start_couchbase() + else: + logger.debug("cluster config not found - preparing node") + # no config in delphix directory + # initial cluster setup + couchbase_obj.delete_xdcr_config() + couchbase_obj.stop_couchbase() + couchbase_obj.delete_data_folder() + couchbase_obj.delete_config_folder() + + # we can't use normal monitor as server is not configured yet + couchbase_obj.start_couchbase(no_wait=True) + + end_time = time.time() + 3660 + + server_status = Status.INACTIVE + + # break the loop either end_time is exceeding from 1 hour or + # server is successfully started + while time.time() < end_time and server_status != Status.ACTIVE: + helper_lib.sleepForSecond(1) # waiting for 1 second + # fetching status + server_status = couchbase_obj.staging_bootstrap_status() + logger.debug("server status {}".format(server_status)) + + # check if cluster not configured and raise an issue + if couchbase_obj.check_cluster_notconfigured(): + logger.debug("Node not configured - creating a new cluster") + couchbase_obj.node_init() + couchbase_obj.cluster_init() + logger.debug("Cluster configured") + else: + logger.debug("Node configured but no configuration in Delphix") + if couchbase_obj.check_cluster_configured(): + logger.debug( + "Configured with staging user/password and alive " + "so not a problem - continue" + ) + else: + logger.debug( + "Cluster configured but not with user/password given " + "in Delphix potentially another cluster" + ) + raise UserError( + "Cluster configured but not with user/password given " + "in Delphix potentially another cluster" + ) + + +def buckets_precreation( + couchbase_obj, + bucket_details_source, + bucket_details_staged, +): + # common steps for both XDCR & CB back up + # return a list of precreated buckets to process + logger.debug("buckets_precreation") + bucket_list = [] + config_setting = couchbase_obj.parameters.config_settings_prov + log_msg = "Bucket names passed for configuration: {}".format( + config_setting, + ) + logger.debug(log_msg) + bucket_configured_staged = [] + if len(config_setting) > 0: + # process for list of buckets + logger.debug("Getting bucket information from config") + buckets_dict = {b["name"]: b for b in bucket_details_source} + for config_bucket in config_setting: + bucket_configured_staged.append(config_bucket["bucketName"]) + log_msg = "Filtering bucket name with size only from above output" + logger.debug(log_msg) + bucket = buckets_dict[config_bucket["bucketName"]] + logger.debug("Running bucket operations for {}".format(bucket)) + bkt_name = config_bucket["bucketName"] + bkt_size = bucket["ram"] + bkt_type = bucket["bucketType"] + bkt_compression = bucket["compressionMode"] + + bkt_size_mb = helper_lib.get_bucket_size_in_MB( + couchbase_obj.parameters.bucket_size, bkt_size, bkt_name + ) + + if config_bucket["bucketName"] not in bucket_details_staged: + couchbase_obj.bucket_create( + config_bucket["bucketName"], + bkt_size_mb, + bkt_type, + bkt_compression, + ) + else: + logger.debug( + "Bucket {} already present in staged environment. " + "Recreating bucket ".format(config_bucket["bucketName"]) + ) + couchbase_obj.bucket_remove(config_bucket["bucketName"]) + couchbase_obj.bucket_create( + config_bucket["bucketName"], + bkt_size_mb, + bkt_type, + bkt_compression, + ) + + bucket_list.append(config_bucket["bucketName"]) + + logger.debug("Finding buckets present at staged server") + bucket_details_staged = couchbase_obj.bucket_list() + filter_bucket_list = helper_lib.filter_bucket_name_from_output( + bucket_details_staged + ) + extra_bucket = set(filter_bucket_list) - set(bucket_configured_staged) + extra_bucket = list(extra_bucket) + + logger.debug("Extra bucket found to delete:{} ".format(extra_bucket)) + for bucket in extra_bucket: + couchbase_obj.bucket_remove(bucket) + else: + # process for all buckets + # filter_source_bucket = helper_lib.filter_bucket_name_from_json( + # bucket_details_source + # ) + for items in bucket_details_source: + if items: + logger.debug("Running bucket operations for {}".format(items)) + bkt_name = items["name"] + bkt_size = items["ram"] + bkt_type = items["bucketType"] + bkt_compression = items["compressionMode"] + + bkt_size_mb = helper_lib.get_bucket_size_in_MB( + couchbase_obj.parameters.bucket_size, bkt_size, bkt_name + ) + if bkt_name not in bucket_details_staged: + couchbase_obj.bucket_create( + bkt_name, bkt_size_mb, bkt_type, bkt_compression + ) + else: + logger.debug( + "Bucket {} already present in staged environment. " + "Recreating bucket ".format(bkt_name) + ) + couchbase_obj.bucket_remove(bkt_name) + couchbase_obj.bucket_create( + bkt_name, bkt_size_mb, bkt_type, bkt_compression + ) + bucket_list.append(bkt_name) + + return bucket_list + + +def build_indexes(couchbase_obj): + # create indexes based on the index definition + + logger.debug("index builder") + ind = couchbase_obj.get_indexes_definition() + logger.debug("indexes definition : {}".format(ind)) + for i in ind: + logger.debug(i) + couchbase_obj.build_index(i) + couchbase_obj.check_index_build() + + +def d_source_status(staged_source, repository, source_config): + status_obj = CouchbaseOperation( + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + logger.debug( + "Checking status for D_SOURCE: {}".format( + source_config.pretty_name, + ) + ) + return status_obj.status() diff --git a/src/operations/virtual.py b/src/operations/virtual.py index 32f6671..d925276 100644 --- a/src/operations/virtual.py +++ b/src/operations/virtual.py @@ -1,26 +1,30 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -# In this module, VDB related operations are implemented. +# In this module, VDB related operations are implemented """ -####################################################################################################################### +############################################################################## -import re - -# Auto generated libs -import sys - -from generated.definitions import SnapshotDefinition -from generated.definitions import SourceConfigDefinition +import json +import logging +import time -from internal_exceptions.database_exceptions import FailedToReadBucketDataFromSnapshot, CouchbaseServicesError from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -import logging from controller.resource_builder import Resource +from dlpx.virtualization.common import RemoteConnection +from dlpx.virtualization.common import RemoteEnvironment +from dlpx.virtualization.common import RemoteUser +from dlpx.virtualization.platform import Status +from generated.definitions import SnapshotDefinition +from generated.definitions import SourceConfigDefinition +from internal_exceptions.database_exceptions import CouchbaseServicesError +from internal_exceptions.database_exceptions import ( + FailedToReadBucketDataFromSnapshot, +) # Global logger for this File logger = logging.getLogger(__name__) @@ -28,45 +32,262 @@ def vdb_status(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) cb_status = provision_process.status() logger.debug("VDB Status is {}".format(cb_status)) + + if cb_status == Status.ACTIVE: + logger.debug("Checking mount point") + if helper_lib.check_stale_mountpoint( + provision_process.connection, virtual_source.parameters.mount_path + ): + logger.debug("error with mount point - report inactive") + return Status.INACTIVE + else: + return Status.ACTIVE + return cb_status def vdb_unconfigure(virtual_source, repository, source_config): # delete all buckets + provision_process = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + vdb_stop(virtual_source, repository, source_config) + provision_process.delete_config() + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.delete_config() + addnode.stop_couchbase() def vdb_reconfigure(virtual_source, repository, source_config, snapshot): # delete all buckets # calll configure - vdb_start(virtual_source, repository, source_config) + + logger.debug("In vdb_reconfigure...") + provision_process = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + + provision_process.stop_couchbase() + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + multinode = True + server_count = len(provision_process.parameters.node_list) + 1 + else: + multinode = False + server_count = 1 + + nodeno = 1 + provision_process.restore_config(what="current", nodeno=nodeno) + provision_process.start_couchbase(no_wait=multinode) + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + nodeno = nodeno + 1 + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug(nodeno) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.stop_couchbase() + addnode.restore_config(what="current", nodeno=nodeno) + addnode.start_couchbase(no_wait=multinode) + + logger.debug("reconfigure for multinode: {}".format(multinode)) + + if multinode: + active_servers = {} + logger.debug("wait for nodes") + logger.debug( + "server count: {} active servers: {}".format( + server_count, sum(active_servers.values()) + ) + ) + + end_time = time.time() + 3660 + + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started + while ( + time.time() < end_time + and sum(active_servers.values()) != server_count + ): + logger.debug( + "server count 2: {} active servers: {}".format( + server_count, sum(active_servers.values()) + ) + ) + nodeno = 1 + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = provision_process.status() # fetching status + logger.debug("server status {}".format(server_status)) + if server_status == Status.ACTIVE: + active_servers[nodeno] = 1 + + for node in provision_process.parameters.node_list: + nodeno = nodeno + 1 + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug(nodeno) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + server_status = addnode.status() # fetching status + logger.debug("server status {}".format(server_status)) + if server_status == Status.ACTIVE: + active_servers[nodeno] = 1 + return _source_config(virtual_source, repository, source_config, snapshot) def vdb_configure(virtual_source, snapshot, repository): - try: - provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_snapshot( - snapshot).build()) - - provision_process.restart_couchbase() - provision_process.node_init() - provision_process.cluster_init() - _do_provision(provision_process, snapshot) - _cleanup(provision_process, snapshot) - src_cfg_obj = _source_config(virtual_source, repository, None, snapshot) - - return src_cfg_obj - except FailedToReadBucketDataFromSnapshot as err: - raise FailedToReadBucketDataFromSnapshot("Provision is failed. " + err.message).to_user_error(), None, \ - sys.exc_info()[2] - except Exception as err: - logger.debug("Provision is failed {}".format(err.message)) - raise + # try: + + logger.debug("VDB CONFIG START") + + provision_process = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_snapshot(snapshot) + .build() + ) + + # TODO: + # fail if already has cluster ? + + # to make sure there is no config + provision_process.delete_config() + # provision_process.delete_config_folder() + + provision_process.restore_config(what="parent") + + # if bucket doesn't existing in target cluster + # couchbase will delete directory while starting + # so we have to rename it before start + + bucket_list_and_size = json.loads(snapshot.bucket_list) + + if not bucket_list_and_size: + raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.") + else: + logger.debug( + "snapshot bucket data is: {}".format(bucket_list_and_size) + ) + + provision_process.restart_couchbase(provision=True) + provision_process.rename_cluster() + + nodeno = 1 + + logger.debug( + "MAIN CONNECTION HOST: {}".format( + provision_process.connection.environment.host.name + ) + ) + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + nodeno = nodeno + 1 + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug(nodeno) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_snapshot(snapshot) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + logger.debug( + "ADDITIONAL CONNECTION HOST: {}".format( + provision_process.connection.environment.host.name + ) + ) + addnode.addnode(nodeno, node) + # TODO + # FINISH HERE + # addnode.delete_config() + # addnode.stop_couchbase() + + src_cfg_obj = _source_config(virtual_source, repository, None, snapshot) + + return src_cfg_obj + + +def make_nonprimary_connection( + primary_connection, secondary_env_ref, secondary_user_ref +): + dummy_host = primary_connection.environment.host + user = RemoteUser(name="unused", reference=secondary_user_ref) + environment = RemoteEnvironment( + name="unused", reference=secondary_env_ref, host=dummy_host + ) + return RemoteConnection(environment=environment, user=user) def _do_provision(provision_process, snapshot): @@ -75,48 +296,86 @@ def _do_provision(provision_process, snapshot): if not bucket_list_and_size: raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.") else: - logger.debug("snapshot bucket data is: {}".format(bucket_list_and_size)) + logger.debug( + "snapshot bucket data is: {}".format(bucket_list_and_size) + ) + + bucket_list_and_size = json.loads(bucket_list_and_size) - for item in bucket_list_and_size.split(':'): - logger.debug("Creating bucket is: {}".format(item)) + try: + bucket_list = provision_process.bucket_list() + bucket_list = helper_lib.filter_bucket_name_from_output(bucket_list) + logger.debug(bucket_list) + except Exception as err: + logger.debug("Failed to get bucket list. Error is " + str(err)) + + for item in bucket_list_and_size: + logger.debug("Checking bucket: {}".format(item)) # try: - bucket_name = item.split(',')[0] - bkt_size_mb = int(item.split(',')[1].strip()) // 1024 // 1024 - provision_process.bucket_create(bucket_name, bkt_size_mb) - helper_lib.sleepForSecond(2) + bucket_name = item["name"] + bkt_size = item["ram"] + bkt_type = item["bucketType"] + bkt_compression = item["compressionMode"] + bkt_size_mb = helper_lib.get_bucket_size_in_MB( + 0, bkt_size, bucket_name + ) + if bucket_name not in bucket_list: + # a new bucket needs to be created + logger.debug("Creating bucket: {}".format(bucket_name)) + provision_process.bucket_create( + bucket_name, bkt_size_mb, bkt_type, bkt_compression + ) + helper_lib.sleepForSecond(2) + else: + logger.debug( + "Bucket {} exist - no need to rename directory".format( + bucket_name + ) + ) + + provision_process.stop_couchbase() + + for item in helper_lib.filter_bucket_name_from_output( + bucket_list_and_size + ): + logger.debug("Checking bucket: {}".format(item)) + bucket_name = item.split(",")[0] + logger.debug("restoring folders") + provision_process.move_bucket(bucket_name, "restore") + + provision_process.start_couchbase() # getting config directory path directory = provision_process.get_config_directory() # making directory and changing permission to 755. provision_process.make_directory(directory) - # This file path is being used to store the bucket information coming in snapshot - config_file_path = provision_process.get_config_file_path() - - content = "BUCKET_LIST=" + _find_bucket_name_from_snapshot(snapshot) - - # Adding bucket list in config file path .config file, inside .delphix folder - helper_lib.write_file(provision_process.connection, content, config_file_path) + # This file path is being used to store the bucket information + # coming in snapshot def _cleanup(provision_process, snapshot): logger.debug("Deleting extra buckets from target host") bucket_list = [] - # Get details of already exist buckets on the target server. We need to delete if some of these are not needed + # Get details of already exist buckets on the target server. + # We need to delete if some of these are not needed try: bucket_list = provision_process.bucket_list() logger.debug(bucket_list) - # Removing extra information captured like ramsize, ramused. Only need to get bucket name from output + # Removing extra information captured like ramsize, ramused. + # Only need to get bucket name from output bucket_list = helper_lib.filter_bucket_name_from_output(bucket_list) except Exception as err: - logger.debug("Failed to get bucket list. Error is " + err.message) + logger.debug("Failed to get bucket list. Error is " + str(err)) snapshot_bucket_list_and_size = snapshot.bucket_list snapshot_bucket = _find_bucket_name_from_snapshot(snapshot) - if (snapshot_bucket): - logger.debug("BUCKET_LIST to be provisioned: {}".format(snapshot_bucket)) - snapshot_bucket_list = snapshot_bucket.split(':') + if snapshot_bucket: + logger.debug( + "BUCKET_LIST to be provisioned: {}".format(snapshot_bucket) + ) + snapshot_bucket_list = snapshot_bucket.split(":") bucket_to_delete = [] bucket_to_update = [] for bkt in bucket_list: @@ -128,7 +387,9 @@ def _cleanup(provision_process, snapshot): logger.debug("Bucket list to delete: {} ".format(bucket_to_delete)) _bucket_common_task(provision_process, bucket_to_delete) logger.debug("Bucket list to update: {} ".format(bucket_to_update)) - _bucket_modify_task(provision_process, bucket_to_update, snapshot_bucket_list_and_size) + _bucket_modify_task( + provision_process, bucket_to_update, snapshot_bucket_list_and_size + ) else: logger.debug("This block is not expected to run") @@ -144,17 +405,25 @@ def _bucket_common_task(provision_process, bucket_list): helper_lib.sleepForSecond(2) -def _bucket_modify_task(provision_process, bucket_list, snapshot_bucket_list_and_size): +def _bucket_modify_task( + provision_process, bucket_list, snapshot_bucket_list_and_size +): for bkt in bucket_list: bkt = bkt.strip() logger.debug("Modification of bucket {} started".format(bkt)) - ramquotasize = _find_bucket_size_byname(bkt, snapshot_bucket_list_and_size) - logger.debug("Update bucket {} with ramsize {}MB".format(bkt, ramquotasize)) + ramquotasize = _find_bucket_size_byname( + bkt, snapshot_bucket_list_and_size + ) + logger.debug( + "Update bucket {} with ramsize {}MB".format(bkt, ramquotasize) + ) provision_process.bucket_edit_ramquota(bkt, _ramsize=ramquotasize) helper_lib.sleepForSecond(2) -def _source_config(virtual_source, repository=None, source_config=None, snapshot=None): +def _source_config( + virtual_source, repository=None, source_config=None, snapshot=None +): port = virtual_source.parameters.couchbase_port mount_path = virtual_source.parameters.mount_path host = virtual_source.connection.environment.host.name @@ -163,80 +432,185 @@ def _source_config(virtual_source, repository=None, source_config=None, snapshot couchbase_src_host=host, couchbase_src_port=port, pretty_name=pretty_name, - db_path=mount_path + db_path=mount_path, ) def vdb_start(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Starting couchbase server") try: provision_process.start_couchbase() - except Exception: - raise CouchbaseServicesError(" Start").to_user_error(), None, sys.exc_info()[2] + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.start_couchbase() + except Exception as ex_obj: + logger.exception(ex_obj) + raise CouchbaseServicesError(" Start").to_user_error() def vdb_stop(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Stopping couchbase server") provision_process.stop_couchbase() + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.stop_couchbase() + def vdb_pre_snapshot(virtual_source, repository, source_config): logger.debug("In Pre snapshot...") + provision_process = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + + nodeno = 1 + + provision_process.save_config(what="current", nodeno=nodeno) + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): + for node in provision_process.parameters.node_list: + nodeno = nodeno + 1 + logger.debug("+++++++++++++++++++++++++++") + logger.debug(node) + logger.debug(nodeno) + logger.debug("+++++++++++++++++++++++++++") + addnode = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.save_config(what="current", nodeno=nodeno) def post_snapshot(virtual_source, repository, source_config): try: logger.debug("Taking Post Snapshot...") provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) - config_file = provision_process.get_config_file_path() + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + + ind = [] + + # ind = provision_process.get_indexes_definition() + # logger.debug("indexes definition : {}".format(ind)) - stdout, stderr, exit_code = helper_lib.read_file(virtual_source.connection, config_file) - bucket_list = re.sub('BUCKET_LIST=', '', stdout) - logger.debug("BUCKET_LIST={}".format(bucket_list)) + bucket_details = json.dumps(provision_process.bucket_list()) + logger.debug("BUCKET_LIST={}".format(bucket_details)) db_path = virtual_source.parameters.mount_path time_stamp = helper_lib.current_time() couchbase_port = virtual_source.parameters.couchbase_port couchbase_host = virtual_source.connection.environment.host.name snapshot_id = str(helper_lib.get_snapshot_id()) - snapshot = SnapshotDefinition(db_path=db_path, couchbase_port=couchbase_port, couchbase_host=couchbase_host, - bucket_list=bucket_list, time_stamp=time_stamp, snapshot_id=snapshot_id) - logger.info("snapshot schema: {}".format(snapshot)) + snapshot = SnapshotDefinition( + db_path=db_path, + couchbase_port=couchbase_port, + couchbase_host=couchbase_host, + bucket_list=bucket_details, + time_stamp=time_stamp, + snapshot_id=snapshot_id, + indexes=ind, + ) + + snapshot.couchbase_admin = provision_process.parameters.couchbase_admin + snapshot.couchbase_admin_password = ( + provision_process.parameters.couchbase_admin_password + ) + return snapshot except Exception as err: - logger.debug("Snap shot is failed with error {}".format(err.message)) + logger.debug("Snap shot is failed with error {}".format(str(err))) raise # This function returns the bucket name from snapshot. def _find_bucket_name_from_snapshot(snapshot): - bucket_list_and_size = snapshot.bucket_list + bucket_list_and_size = json.loads(snapshot.bucket_list) logger.debug("SnapShot bucket data is: {}".format(bucket_list_and_size)) - # bucket_list_and_size contains the ramsize e.g. "Bucket1,122:Bucket2,3432" - # Filtering the size from above information. - bucket_list_and_size += ':' - # Parsing logic because there could be bucket name having some digit - # bucket details in snapshot : Bucket_name1,RamSize1:Bucket_name2,RamSize2: - bucket_name = re.sub(',[0-9]*:', ':', bucket_list_and_size) - bucket_name = bucket_name.strip(':') + bucket_name = helper_lib.filter_bucket_name_from_output( + bucket_list_and_size + ) return bucket_name def _find_bucket_size_byname(bucket_name, bucket_metadata): data_found = 0 - for bkt in bucket_metadata.split(':'): - if bkt.split(',')[0] == bucket_name: + for bkt in bucket_metadata.split(":"): + if bkt.split(",")[0] == bucket_name: logger.debug("Bucket {} found in list".format(bucket_name)) data_found = 1 - bkt_size_mb = int(bkt.split(',')[1].strip()) // 1024 // 1024 + bkt_size_mb = int(bkt.split(",")[1].strip()) // 1024 // 1024 return bkt_size_mb if data_found == 0: # raise exception. Ideally this condition should never occur - raise Exception("Failed to find the bucket_name from bucket_metadata list") + raise Exception( + "Failed to find the bucket_name from bucket_metadata list" + ) + + +def _build_indexes(provision_process, snapshot): + logger.debug("index builder") + + for i in snapshot.indexes: + logger.debug(i) + provision_process.build_index(i) diff --git a/src/plugin_runner.py b/src/plugin_runner.py index 5ce1b36..f88d876 100644 --- a/src/plugin_runner.py +++ b/src/plugin_runner.py @@ -1,17 +1,67 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2024 by Delphix. All rights reserved. # # -from dlpx.virtualization.platform import Mount, MountSpecification, Plugin, Status -from operations import discovery, linked, virtual +import logging +import os + +from controller.couchbase_operation import CouchbaseOperation +from controller.helper_lib import check_server_is_used +from controller.helper_lib import check_stale_mountpoint +from controller.helper_lib import clean_stale_mountpoint +from controller.resource_builder import Resource +from dlpx.virtualization.common import RemoteConnection +from dlpx.virtualization.common import RemoteEnvironment +from dlpx.virtualization.common import RemoteHost +from dlpx.virtualization.common import RemoteUser +from dlpx.virtualization.platform import Mount +from dlpx.virtualization.platform import MountSpecification +from dlpx.virtualization.platform import OwnershipSpecification +from dlpx.virtualization.platform import Plugin +from operations import discovery +from operations import linked +from operations import virtual from utils import setup_logger -from db_commands.constants import EVICTION_POLICY - plugin = Plugin() setup_logger._setup_logger() +logger = logging.getLogger(__name__) + + +@plugin.upgrade.linked_source("2023.10.27.01") +def update_bucket_size(old_linked_source): + logger.debug(f"Started upgrade update_bucket_size:{old_linked_source}") + old_linked_source = dict(old_linked_source) + if isinstance(old_linked_source["bucketSize"], int): + if old_linked_source["bucketSize"] == 0: + old_linked_source["bucketSize"] = [] + else: + d = [{"bname": "*", "bsize": old_linked_source["bucketSize"]}] + old_linked_source["bucketSize"] = d + logger.debug(f"Completed update_bucket_size: {old_linked_source}") + return old_linked_source + + +@plugin.upgrade.linked_source("2023.10.27.02") +def update_archive_name(old_linked_source): + logger.debug(f"Started upgrade update_archive_name:{old_linked_source}") + old_linked_source = dict(old_linked_source) + if "archiveName" not in old_linked_source.keys(): + if old_linked_source["couchbaseBakLoc"] == "": + old_linked_source["archiveName"] = "" + else: + old_linked_source["archiveName"] = os.path.basename( + old_linked_source["couchbaseBakLoc"] + ) + old_linked_source["couchbaseBakLoc"] = os.path.dirname( + old_linked_source["couchbaseBakLoc"] + ) + logger.debug(f"Completed update_archive_name: {old_linked_source}") + return old_linked_source + + # # Below is an example of the repository discovery operation. # @@ -39,30 +89,65 @@ def source_config_discovery(source_connection, repository): @plugin.linked.post_snapshot() -def linked_post_snapshot(staged_source, repository, source_config, snapshot_parameters): - return linked.post_snapshot(staged_source, repository, source_config,staged_source.parameters.d_source_type) +def linked_post_snapshot( + staged_source, repository, source_config, optional_snapshot_parameters +): + return linked.post_snapshot( + staged_source, + repository, + source_config, + staged_source.parameters.d_source_type, + ) @plugin.linked.mount_specification() def linked_mount_specification(staged_source, repository): mount_path = staged_source.parameters.mount_path + + if check_stale_mountpoint(staged_source.staged_connection, mount_path): + cleanup_process = CouchbaseOperation( + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .build() + ) + cleanup_process.stop_couchbase() + clean_stale_mountpoint(staged_source.staged_connection, mount_path) + + check_server_is_used(staged_source.staged_connection, mount_path) + environment = staged_source.staged_connection.environment linked.check_mount_path(staged_source, repository) + logger.debug("Mounting path {}".format(mount_path)) mounts = [Mount(environment, mount_path)] - return MountSpecification(mounts) + logger.debug( + "Setting ownership to uid {} and gid {}".format( + repository.uid, + repository.gid, + ) + ) + ownership_spec = OwnershipSpecification(repository.uid, repository.gid) + return MountSpecification(mounts, ownership_spec) @plugin.linked.pre_snapshot() -def linked_pre_snapshot(staged_source, repository, source_config, snapshot_parameters): - if int(snapshot_parameters.resync) == 1: - linked.resync(staged_source, repository, source_config, staged_source.parameters) - linked.pre_snapshot(staged_source, repository, source_config, staged_source.parameters) +def linked_pre_snapshot( + staged_source, repository, source_config, optional_snapshot_parameters +): + if optional_snapshot_parameters and optional_snapshot_parameters.resync: + linked.resync( + staged_source, repository, source_config, staged_source.parameters + ) + else: + linked.pre_snapshot( + staged_source, repository, source_config, staged_source.parameters + ) @plugin.linked.status() def linked_status(staged_source, repository, source_config): return linked.d_source_status(staged_source, repository, source_config) + @plugin.linked.stop_staging() def stop_staging(staged_source, repository, source_config): linked.stop_staging(staged_source, repository, source_config) @@ -80,7 +165,12 @@ def configure(virtual_source, snapshot, repository): @plugin.virtual.reconfigure() def reconfigure(virtual_source, repository, source_config, snapshot): - return virtual.vdb_reconfigure(virtual_source, repository, source_config, snapshot) + return virtual.vdb_reconfigure( + virtual_source, + repository, + source_config, + snapshot, + ) @plugin.virtual.pre_snapshot() @@ -106,15 +196,106 @@ def stop(virtual_source, repository, source_config): @plugin.virtual.mount_specification() def virtual_mount_specification(virtual_source, repository): mount_path = virtual_source.parameters.mount_path + + if check_stale_mountpoint(virtual_source.connection, mount_path): + cleanup_process = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .build() + ) + cleanup_process.stop_couchbase() + clean_stale_mountpoint(virtual_source.connection, mount_path) + + check_server_is_used(virtual_source.connection, mount_path) + mounts = [Mount(virtual_source.connection.environment, mount_path)] - return MountSpecification(mounts) + logger.debug("Mounting path {}".format(mount_path)) + logger.debug( + "Setting ownership to uid {} and gid {}".format( + repository.uid, + repository.gid, + ) + ) + ownership_spec = OwnershipSpecification(repository.uid, repository.gid) + + logger.debug( + "in mounting: {}".format( + str(virtual_source.parameters.node_list), + ) + ) + + if ( + virtual_source.parameters.node_list is not None + and len(virtual_source.parameters.node_list) > 0 + ): + # more nodes + for m in virtual_source.parameters.node_list: + logger.debug("in loop: {}".format(str(m))) + node_host = RemoteHost( + name="foo", + reference=m["environment"].replace("_ENVIRONMENT", ""), + binary_path="", + scratch_path="", + ) + e = RemoteEnvironment("foo", m["environment"], node_host) + mount = Mount(e, mount_path) + mounts.append(mount) + + user = RemoteUser(name="unused", reference=m["environmentUser"]) + environment = RemoteEnvironment( + name="unused", reference=m["environment"], host=node_host + ) + clean_node_conn = RemoteConnection( + environment=environment, + user=user, + ) + + if check_stale_mountpoint(clean_node_conn, mount_path): + clean_node = CouchbaseOperation( + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .build(), + clean_node_conn, + ) + clean_node.stop_couchbase() + clean_stale_mountpoint(clean_node_conn, mount_path) + + check_server_is_used(clean_node_conn, mount_path) + + return MountSpecification(mounts, ownership_spec) @plugin.virtual.status() def virtual_status(virtual_source, repository, source_config): + logger.debug("in status") return virtual.vdb_status(virtual_source, repository, source_config) @plugin.virtual.unconfigure() def unconfigure(virtual_source, repository, source_config): - virtual.vdb_stop(virtual_source, repository, source_config) + logger.debug("UNCONFIGURE") + virtual.vdb_unconfigure(virtual_source, repository, source_config) + + +@plugin.upgrade.virtual_source("2021.07.19") +def add_node_to_virtual(old_virtual_source): + new_virt = dict(old_virtual_source) + new_virt["node_list"] = [] + return new_virt + + +@plugin.upgrade.virtual_source("2021.10.06") +def add_node_to_virtual1(old_virtual_source): + logger.debug("Doing upgrade to node_addr") + new_virt = dict(old_virtual_source) + logger.debug(new_virt) + for i in new_virt["node_list"]: + i["node_addr"] = "" + logger.debug("After changes") + logger.debug(new_virt) + return new_virt + + +@plugin.linked.source_size() +def linked_source_size(staged_source, repository, source_config): + return linked.source_size(staged_source, repository, source_config) diff --git a/src/utils/setup_logger.py b/src/utils/setup_logger.py index 6a223ac..90720f2 100644 --- a/src/utils/setup_logger.py +++ b/src/utils/setup_logger.py @@ -1,27 +1,35 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # import logging + from dlpx.virtualization import libs class Logger: - """ + """ """ - """ _logger = None def __get_mode(self, mode): return eval("logging." + mode) - def __init__(self,name, mode="DEBUG", formatter='[%(asctime)s] [%(levelname)-10s] [%(filename)-15s:%(lineno)2d] %(message)s'): + def __init__( + self, + name, + mode="DEBUG", + formatter="[%(asctime)s] [%(levelname)-10s] " + "[%(filename)-15s:%(lineno)2d] %(message)s", + ): if Logger._logger is None: vsdkHandler = libs.PlatformHandler() vsdkHandler.setLevel(self.__get_mode(mode)) - vsdkFormatter = logging.Formatter(formatter, - datefmt="%Y-%m-%d %H:%M:%S") + vsdkFormatter = logging.Formatter( + formatter, + datefmt="%Y-%m-%d %H:%M:%S", + ) vsdkHandler.setFormatter(vsdkFormatter) logger = logging.getLogger(name) logger.addHandler(vsdkHandler) @@ -31,12 +39,18 @@ def __init__(self,name, mode="DEBUG", formatter='[%(asctime)s] [%(levelname)-10s def get_logger(self): return Logger._logger + def _setup_logger(): - log_message_format = '[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s' - log_message_date_format = '%Y-%m-%d %H:%M:%S' + log_message_format = ( + "[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s" + ) + log_message_date_format = "%Y-%m-%d %H:%M:%S" # Create a custom formatter. This will help in diagnose the problem. - formatter = logging.Formatter(log_message_format, datefmt=log_message_date_format) + formatter = logging.Formatter( + log_message_format, + datefmt=log_message_date_format, + ) platform_handler = libs.PlatformHandler() platform_handler.setFormatter(formatter) @@ -45,4 +59,4 @@ def _setup_logger(): logger.addHandler(platform_handler) # By default the root logger's level is logging.WARNING. - logger.setLevel(logging.DEBUG) \ No newline at end of file + logger.setLevel(logging.DEBUG) diff --git a/src/utils/utilities.py b/src/utils/utilities.py index 621b685..eb353fa 100644 --- a/src/utils/utilities.py +++ b/src/utils/utilities.py @@ -1,61 +1,199 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # import logging +import random from dlpx.virtualization import libs from dlpx.virtualization.libs import exceptions -from db_commands import commands - # logger object logger = logging.getLogger(__name__) -def execute_bash(source_connection, command_name, callback_func=None, environment_vars=None): +def execute_bash( + source_connection, command_name, callback_func=None, environment_vars=None +): """ :param callback_func: :param source_connection: Connection object for the source environment :param command_name: Command to be search from dictionary of bash command - :param environment_vars: Expecting environment variables which are required to execute the command + :param environment_vars: Expecting environment variables which are required + to execute the command :return: list of output of command, error string, exit code """ if source_connection is None: raise exceptions.PluginScriptError("Connection object cannot be empty") - result = libs.run_bash(source_connection, command=command_name, variables=environment_vars, use_login_shell=True) + result = libs.run_bash( + source_connection, + command=command_name, + variables=environment_vars, + use_login_shell=True, + ) - # strip the each part of result to remove spaces from beginning and last of output + # strip the each part of result to remove spaces from beginning + # and last of output output = result.stdout.strip() error = result.stderr.strip() exit_code = result.exit_code - # Verify the exit code of each executed command. 0 means command ran successfully and for other code it is failed. - # For failed cases we need to find the scenario in which programs will die and otherwise execution will continue. - _handle_exit_code(exit_code, error, output, callback_func) + # Verify the exit code of each executed command. 0 means command ran + # successfully and for other code it is failed. + # For failed cases we need to find the scenario in which programs will + # die and otherwise execution will continue. + # _handle_exit_code(exit_code, error, output, callback_func) return [output, error, exit_code] -def _handle_exit_code(exit_code, std_err=None, std_output=None, callback_func=None): - if exit_code == 0: - return +def execute_expect( + source_connection, command_name, callback_func=None, environment_vars=None +): + """ + :param callback_func: + :param source_connection: Connection object for the source environment + :param command_name: Command to be search from dictionary of bash command + :param environment_vars: Expecting environment variables which are + required to execute the command + :return: list of output of command, error string, exit code + """ - else: - # Call back function which contains logic to skip the error and continue to throw + if source_connection is None: + raise exceptions.PluginScriptError("Connection object cannot be empty") + + file_random_id = random.randint(1000000000, 9999999999) + + if "SHELL_DATA" in environment_vars: + environment_vars["CB_CMD"] = environment_vars["CB_CMD"].replace( + ".sh", f"_{file_random_id}.sh" + ) + result = libs.run_bash( + source_connection, + command='echo -e "$SHELL_DATA" > $CB_CMD', + use_login_shell=True, + variables=environment_vars, + ) + output = result.stdout.strip() + error = result.stderr.strip() + exit_code = result.exit_code + + logger.debug(f"dump_output==={output}") + logger.debug(f"dump_error==={error}") + logger.debug(f"dump_exit_code==={exit_code}") + result = libs.run_bash( + source_connection, + command="chmod +x $CB_CMD", + use_login_shell=True, + variables=environment_vars, + ) + output = result.stdout.strip() + error = result.stderr.strip() + exit_code = result.exit_code + + logger.debug(f"executable_output==={output}") + logger.debug(f"executable_error==={error}") + logger.debug(f"executable_exit_code==={exit_code}") + + file_path = f"/tmp/expect_script_{file_random_id}.exp" + + result = libs.run_bash( + source_connection, + command=f"echo -e '{command_name}' > {file_path}", + use_login_shell=True, + ) + output = result.stdout.strip() + error = result.stderr.strip() + exit_code = result.exit_code + + logger.debug(f"script_dump_output==={output}") + logger.debug(f"script_dump_error==={error}") + logger.debug(f"script_dump_exit_code==={exit_code}") + + result = libs.run_bash( + source_connection, + command=f"/usr/bin/expect -f {file_path}", + variables=environment_vars, + use_login_shell=True, + ) + + # strip the each part of result to remove spaces from beginning and + # last of output + output = result.stdout.strip() + error = result.stderr.strip() + exit_code = result.exit_code + + logger.debug(f"expect_output==={output}") + logger.debug(f"expect_error==={error}") + logger.debug(f"expect_exit_code==={exit_code}") + + libs.run_bash( + source_connection, command=f"rm -rf {file_path}", use_login_shell=True + ) + if "SHELL_DATA" in environment_vars: + libs.run_bash( + source_connection, command="rm -rf $CB_CMD", use_login_shell=True + ) + + if "DLPX_EXPECT_EXIT_CODE" in output: + exit_code = int( + output.split("DLPX_EXPECT_EXIT_CODE:")[1].split("\n")[0] + ) + if "\n" in output: + msg = ( + output.split("DLPX_EXPECT_EXIT_CODE:")[1] + .split("\n", 1)[1] + .strip() + ) + else: + msg = "" + if exit_code != 0: + error = msg + else: + output = msg + + if "cbq>" in output and output.rsplit("\n", 1)[1].strip() == "cbq>": + output = output.rsplit("\n", 1)[0] + + logger.debug(f"final_output==={output}") + logger.debug(f"final_error==={error}") + logger.debug(f"final_exit_code==={exit_code}") + # Verify the exit code of each executed command. 0 means command ran + # successfully and for other code it is failed. + # For failed cases we need to find the scenario in which programs + # will die and otherwise execution will continue. + # _handle_exit_code(exit_code, error, output, callback_func) + return [output, error, exit_code] + + +def _handle_exit_code( + exit_code, std_err=None, std_output=None, callback_func=None +): + if exit_code != 0: + # Call back function which contains logic to skip the error and + # continue to throw if callback_func: - logger.debug("Executing call back. Seems some exception is observed. Validating last error...") + logger.debug( + "Executing call back. Seems some exception is observed. " + "Validating last error..." + ) try: result_of_match = callback_func(std_output) - logger.debug("Call back result is : {}".format(result_of_match)) + logger.debug( + "Call back result is : {}".format(result_of_match) + ) if result_of_match: return True except Exception as err: - logger.debug("Failed to execute call back function with error: {}".format(err.message)) - - error_details = std_output - if error_details is None or error_details == "": - error_details = std_err - raise Exception(error_details) + logger.debug( + "Failed to execute call back function with " + "error: {}".format(str(err)) + ) + error_details = std_output + if error_details is None or error_details == "": + error_details = std_err + raise Exception(error_details) + else: + return False diff --git a/test/.coveragerc b/test/.coveragerc deleted file mode 100644 index 28c2620..0000000 --- a/test/.coveragerc +++ /dev/null @@ -1,18 +0,0 @@ -[run] -omit = - */generated/* - */lib/python2.7/* - *__init__.py* - *.png - *.css - *.js - *.json - - -[report] -exclude_lines = - raise NotImplementedError - return NotImplemented - if __name__ == .__main__. - - diff --git a/test/__init__.py b/test/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/test/conftest.py b/test/conftest.py deleted file mode 100755 index 068d4b1..0000000 --- a/test/conftest.py +++ /dev/null @@ -1,239 +0,0 @@ -# -# Copyright (c) 2020 by Delphix. All rights reserved. -# - -####################################################################################################################### - -import pytest -from dlpx.virtualization.common._common_classes import RemoteUser, RemoteHost, RemoteEnvironment, RemoteConnection -from dlpx.virtualization.platform import StagedSource, Mount, VirtualSource - -import sys - -sys.path.append('src') -from src.controller.couchbase_operation import CouchbaseOperation -from src.controller.resource_builder import Resource -from src.generated.definitions.linked_source_definition import LinkedSourceDefinition -from src.generated.definitions.repository_definition import RepositoryDefinition -from src.generated.definitions.source_config_definition import SourceConfigDefinition -from src.generated.definitions.snapshot_parameters_definition import SnapshotParametersDefinition -from src.generated.definitions.virtual_source_definition import VirtualSourceDefinition -from src.generated.definitions.snapshot_definition import SnapshotDefinition - -from test import constants - - -############# Fixtures for connection related classes defined in vsdk ################################################# -@pytest.fixture(scope="session", autouse=True) -def user(): - return RemoteUser(constants.username, constants.UserReference) - - -@pytest.fixture(scope="session", autouse=True) -def host(): - return RemoteHost(constants.hostname, constants.HostReference, constants.BINARY_PATH, constants.ScratchPath) - - -@pytest.fixture(scope="session", autouse=True) -def environment(host): - return RemoteEnvironment(constants.Environment, constants.EnvironmentReference, host) - - -@pytest.fixture(scope="session", autouse=True) -def source_connection(environment, user): - return RemoteConnection(environment, user) - - -####################################################################################################################### - - -############# Fixtures for objects received in plugin_runner.py ####################################################### - -@pytest.fixture(scope="session", autouse=True) -def staged_connection(source_connection): - return source_connection - - -@pytest.fixture(scope="session", autouse=True) -def source_config(): - return SourceConfigDefinition(couchbase_src_port=constants.source_port, - couchbase_src_host=constants.source_hostname, - pretty_name=constants.pretty_name, - db_path=constants.db_path) - - -@pytest.fixture(scope="session", autouse=True) -def snapshot_parameters(): - return SnapshotParametersDefinition(resync=True) - - -@pytest.fixture(scope="session", autouse=True) -def virtual_source_parameters(): - return VirtualSourceDefinition(cluster_ftsram_size=constants.cluster_fts_ramsize, - eventing_service=True, - cluster_index_ram_size=constants.cluster_index_ramsize, - cluster_eventing_ram_size=constants.cluster_eventing_ramsize, - couchbase_port=constants.port, - tgt_cluster_name=constants.target_cluster_name, - cluster_ram_size=constants.cluster_ramsize, - mount_path=constants.mount_path, - fts_service=True, - bucket_eviction_policy=constants.evictionpolicy, - couchbase_admin='Administrator', - analytics_service=True, - cluster_analytics_ram_size=constants.cluster_analytics_ramsize, - couchbase_admin_password='') - - -@pytest.fixture(scope="session", autouse=True) -def snapshot(): - return SnapshotDefinition(db_path=constants.db_path, - couchbase_port=constants.port, - couchbase_host=constants.hostname, - bucket_list=constants.bucket_name, - time_stamp=constants.timestamp, - snapshot_id=constants.snapshot_id) - - -@pytest.fixture(scope="session", autouse=True) -def staged_parameters_xdcr(): - return LinkedSourceDefinition( - cluster_ftsram_size=constants.cluster_fts_ramsize, - couchbase_port=constants.port, - xdcr_admin='Administrator', - cluster_index_ram_size=constants.cluster_index_ramsize, - bucket_eviction_policy='valueOnly', - eventing_service=True, couchbase_bak_repo='', - couchbase_bak_loc='', config_settings_prov=None, - stg_cluster_name='', couchbase_host='', - couchbase_admin_password='', fts_service=True, - couchbase_admin='Administrator', mount_path='', - xdcr_admin_password='', - cluster_eventing_ram_size=constants.cluster_eventing_ramsize, - cluster_ram_size=constants.ramsize, - d_source_type='XDCR', analytics_service=True, - cluster_analytics_ram_size=constants.cluster_analytics_ramsize, - bucket_size=0, - validate=True) - - -@pytest.fixture(scope="session", autouse=True) -def repository(): - return RepositoryDefinition(cb_shell_path=constants.shell_path, - version=constants.version, - cb_install_path=constants.INSTALL_PATH, - pretty_name=constants.pretty_name, - validate=True) - - -@pytest.fixture(scope="session", autouse=True) -def mount(environment): - return Mount(environment, constants.mount_path, None) - - -@pytest.fixture(scope="session", autouse=True) -def staged_source(source_connection, staged_connection, staged_parameters_xdcr, mount): - staged_source = StagedSource(constants.UID, source_connection, staged_parameters_xdcr, mount, staged_connection) - return staged_source - - -@pytest.fixture(scope="session", autouse=True) -def virtual_source(source_connection, staged_connection, virtual_source_parameters, mount): - staged_source = VirtualSource(constants.GID, source_connection, virtual_source_parameters, mount) - return staged_source - - -######################################################################################################################## - - -############# Fixture to create object of main class: CouchbaseOperation ######################################### -@pytest.fixture(scope="session", autouse=True) -def main_class(staged_source, repository): - obj = CouchbaseOperation(Resource.ObjectBuilder.set_staged_source(staged_source) - .set_repository(repository).set_dsource(True).build()) - return obj - - -@pytest.fixture(scope="session", autouse=True) -def get_couchbase_object(): - def couchbase_object_factory(**kwargs): - params = kwargs.keys() - obj_builder = Resource.ObjectBuilder - for key in params: - if key == "staged_source": - print " Setting staged_source" - obj_builder.set_staged_source(kwargs[key]) - elif key == "dsource": - print " Setting dsource" - obj_builder.set_dsource(kwargs[key]) - elif key == "virtual_source": - print " Setting staged_source" - obj_builder.set_virtual_source(kwargs[key]) - elif key == "repository": - print " Setting repository" - obj_builder.set_repository(kwargs[key]) - elif key == "source_config": - print " Setting source_config" - obj_builder.set_source_config(kwargs[key]) - elif key == "connection": - print " Setting connection" - obj_builder.set_connection(kwargs[key]) - elif key == "snapshot_parameters": - print " Setting snapshot_parameters" - obj_builder.set_snapshot_parameters(kwargs[key]) - elif key == "snapshot": - print " Setting snapshot" - obj_builder.set_snapshot(kwargs[key]) - else: - raise Exception("Invalid key passed") - obj_builder.build() - cb_obj = CouchbaseOperation(obj_builder) - return cb_obj - - return couchbase_object_factory - - -####################################################################################################################### - - -############# Fixture to mock the output of each command which is executing through run_bash ############# -class MockBashResponse(object): - def __init__(self, command_output, error_string, code): - self._stdout = command_output - self._stderr = error_string - self._exit_code = code - - @property - def stdout(self): - return self._stdout - - @property - def stderr(self): - return self._stderr - - @property - def exit_code(self): - return self._exit_code - - def __call__(self, *args, **kwargs): - return self - - -@pytest.fixture(scope="session", autouse=True) -def mock_run_bash(): - def mock_run_bash_inner(connection=None, command=None, **kwargs): - - cmd = kwargs['cmd'] - result_type = kwargs['test_type'] if 'test_type' in kwargs.keys() else constants.PASS - INDEX = kwargs['data_index'] if 'data_index' in kwargs.keys() else 0 - if cmd is not None and result_type is not None: - data = constants.CMD_TEST_DATA[cmd][result_type] - return MockBashResponse(data[INDEX][constants.OUTPUT], data[INDEX][constants.ERROR], - data[INDEX][constants.EXIT]) - if cmd is None: - raise Exception("cmd cannot be None") - if cmd not in constants.CMD_TEST_DATA.keys(): - raise Exception("Invalid command {} passed".format(cmd)) - - return mock_run_bash_inner -######################################################################################################################## diff --git a/test/constants.py b/test/constants.py deleted file mode 100644 index 193aa79..0000000 --- a/test/constants.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2020 by Delphix. All rights reserved. -# -####################################################################################################################### - -INSTALL_PATH = "/opt/couchbase/couchbase-server" -BINARY_PATH = "/opt/couchbase/bin" -shell_path = "/opt/couchbase/couchbase-cli" -hostname = "hostname" -port = 8091 -username = "user" -cluster_name = "cluster" -cluster_ramsize = 100 -cluster_index_ramsize = 200 -cluster_fts_ramsize = 300 -cluster_eventing_ramsize = 400 -cluster_analytics_ramsize = 500 -additional_services = "query,index,data" -source_hostname = "source_hostname" -source_port = 8091 -source_username = "source_username" -source_bucket_name = "source_bucket_name" -target_bucket_name = "target_bucket_name" -uuid = "123456789" -directory_path = "/mnt/provision/test/directory_path" -mount_path = "/mnt/provision/mount_path" -bucket_name = "sample" -flush_value = "0" -ramsize = 100 -evictionpolicy = "valueOnly" -base_path = "base_path" -index = "index" -index_name = "index_name" -backup_location = "backup_location" -backup_repo = "backup_repo" -csv_bucket_list = "csv_bucket_list" -filename = "filename" -file_path = "test" -data = "data" -dir_path = "/tmp/.delphix" -DLPX_BIN_JQ = "/tmp" -version = "1.1" -pretty_name = "couchbase" -db_path = "/var/data/lib" -target_cluster_name = "target_cluster_name" -couchbase_admin_password="password" -timestamp="" -snapshot_id="12345" - -# Delphix Related -UserReference = "user-reference" -HostReference = "host-reference" -ScratchPath = "scratch_path" -Environment = "environment" -EnvironmentReference = "environment-reference" -UID = 1 -GID = 2 - -# unit test related -PASS = 1 -FAIL = 2 -OUTPUT = 0 -ERROR = 1 -EXIT = 2 - -CMD_TEST_DATA = {'start_couchbase_cmd': - [INSTALL_PATH + ' \-- -noinput -detached .', - [("", "", 0)], - [("command not found", "", 1)]], - 'bucket_create_cmd': - ['/opt/couchbase/couchbase-cli bucket-create --cluster 127.0.0.1:8091 --username user --password $password --bucket sample --bucket-type couchbase --bucket-ramsize 100 --bucket-replica 0 --bucket-eviction-policy valueOnly --compression-mode passive --conflict-resolution sequence --wait', - [("SUCCESS: Bucket created", "", 0)], - [("ramQuotaMB - RAM quota cannot be less than 100 MB", "", 1)]], - 'bucket_delete_cmd': - ['/opt/couchbase/couchbase-cli bucket-delete --cluster hostname:8091 --username user --password $password --bucket=sample', - [("SUCCESS: Bucket deleted", "", 0)], - [("Bucket not found", "", 1)]], - 'node_init_cmd': - ['/opt/couchbase/couchbase-cli node-init --cluster 127.0.0.1:8091 --username user --password $password --node-init-data-path /mnt/provision/mount_path/data --node-init-index-path /mnt/provision/mount_path/data --node-init-analytics-path /mnt/provision/mount_path/data --node-init-hostname 127.0.0.1', - [("SUCCESS: Node initialized", "", 0)], - [("Changing data of nodes that are part of provisioned cluster is not supported", "", 1)]], - 'cluster_init_cmd': - ['/opt/couchbase/couchbase-cli cluster-init --cluster hostname:8091 --cluster-username user --cluster-password $password --cluster-ramsize 100 --cluster-name cluster --cluster-index-ramsize 200 --cluster-fts-ramsize 300 --cluster-eventing-ramsize 400 --cluster-analytics-ramsize 500 --services data,index,query,index,data', - [("SUCCESS: Cluster initialized", "", 0)], - [("ERROR: Cluster is already initialized, use setting-cluster to change settings", "", 1)]], - 'xdcr_replicate_cmd': - ['/opt/couchbase/couchbase-cli xdcr-replicate --cluster source_hostname:8091 --username source_username --password $source_password --create --xdcr-from-bucket source_bucket_name --xdcr-to-bucket target_bucket_name --xdcr-cluster-name cluster', - [("XDCR replication create succeeded", "", 0)], - [("Already XDCR set up have been between source and staging server", "", 1)]], - } diff --git a/test/plugin_report.css b/test/plugin_report.css deleted file mode 100755 index 32bf635..0000000 --- a/test/plugin_report.css +++ /dev/null @@ -1,58 +0,0 @@ - - -body { - background-color: LightYellow; -} - - -h1 { - text-indent:-9999px; -} - - -h1:before { - text-indent:0; - content:'Couchbase Plugin: Unit Test Result'; - float:left; -} - -#environment td{ -border: 1px solid #000000;; -} - -h1 { - color: Green; - text-align: center; - -} -table, th, td { - border: 1px solid black; -} - -p { - font-family: Arial; - font-size: 18px; -} - - -table { - border: 1px solid black; - table-layout: fixed; - border-collapse: collapse; - font-size: 14px -} - -#results-table-head { - background-color: lightsteelblue; - font-size: 16px; - color: black; - font-family: Arial; -} - -#results-table td{ - border: 1px solid black; - table-layout: fixed; - width=60% - color: black; - font-size: 14px -} \ No newline at end of file diff --git a/test/pytest.ini b/test/pytest.ini deleted file mode 100755 index d7d6daf..0000000 --- a/test/pytest.ini +++ /dev/null @@ -1,15 +0,0 @@ -[pytest] -norecursedirs = *site-packages* *.egg-info .git -addopts = - --cov=. - --cov-report html:test/CodeCoverage - --cov-report term - --html=test/Report.html --self-contained-html - --cov-config=test/.coveragerc - --css=test/plugin_report.css - -p no:warnings - -console_output_style=classic - - - diff --git a/test/requirements.txt b/test/requirements.txt deleted file mode 100755 index dd16ca0..0000000 --- a/test/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -dvp==2.1.0 -pytest==4.6.10 -pytest-cov -pytest-html diff --git a/test/test_database.py b/test/test_database.py deleted file mode 100644 index 55543e4..0000000 --- a/test/test_database.py +++ /dev/null @@ -1,130 +0,0 @@ -# -# Copyright (c) 2020 by Delphix. All rights reserved. -# -####################################################################################################################### - -import pytest -from dlpx.virtualization.platform import Status - -from src.utils import utilities -import test.constants as CS -from src.db_commands.commands import CommandFactory - - -def printlog(casenumber): - if casenumber == 1: - print "1. Command validated successfully" - elif casenumber == 2: - print "2. Positive test case validated" - elif casenumber == 3: - print "3. Negative test case validated" - - - -def test_start_couchbase(main_class, monkeypatch, mock_run_bash): - # positive test case - kwargs = {'cmd': 'start_couchbase_cmd', 'test_type': CS.PASS} - assert CommandFactory.start_couchbase(CS.INSTALL_PATH) == CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - def get_status(): - return Status.ACTIVE - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - monkeypatch.setattr(main_class, 'status', get_status) - - assert main_class.start_couchbase() is None - printlog(2) - #Negative test case - kwargs['test_type'] = CS.FAIL - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - with pytest.raises(Exception) as err: - main_class.start_couchbase() - err.match(r'.*Internal error occurred*') - printlog(3) - - -def test_bucket_create(main_class, monkeypatch, mock_run_bash): - # positive test case - - kwargs = {'cmd': 'bucket_create_cmd', 'test_type': CS.PASS} - assert CommandFactory.bucket_create(CS.shell_path, CS.hostname, CS.port, CS.username, CS.bucket_name, CS.ramsize, - CS.evictionpolicy) == \ - CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.bucket_create(CS.bucket_name, CS.ramsize) is None - printlog(2) - # Negative test case - kwargs['test_type'] = CS.FAIL - kwargs['data_index'] = 0 - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - with pytest.raises(Exception) as err: - main_class.bucket_create(CS.bucket_name, CS.ramsize) - err.match(r'.*Provided bucket size is not suffice to proceed*') - printlog(3) - - -def test_bucket_delete(main_class, monkeypatch, mock_run_bash): - # positive test case - kwargs = {'cmd': 'bucket_delete_cmd', 'test_type': CS.PASS} - assert CommandFactory.bucket_delete(CS.shell_path, CS.hostname, CS.port, CS.username, CS.bucket_name) == \ - CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.bucket_delete(CS.bucket_name)[2] == 0 - printlog(2) - # Negative test case - kwargs['test_type'] = CS.FAIL - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - with pytest.raises(Exception) as err: - main_class.bucket_delete(CS.bucket_name) - err.match(r'.*Internal error occurred*') - printlog(3) - - -def test_node_init_cmd(main_class, monkeypatch, mock_run_bash): - # positive test case - kwargs = {'cmd': 'node_init_cmd', 'test_type': CS.PASS} - assert CommandFactory.node_init(CS.shell_path, CS.port, CS.username, CS.mount_path) == \ - CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.node_init() is None - printlog(2) - # Negative test case - kwargs['test_type'] = CS.FAIL - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - main_class.node_init() - printlog(3) - - -def test_cluster_init_cmd(main_class, monkeypatch, mock_run_bash): - # positive test case - kwargs = {'cmd': 'cluster_init_cmd', 'test_type': CS.PASS} - assert CommandFactory.cluster_init(CS.shell_path, CS.hostname, CS.port, CS.username, CS.ramsize, CS.cluster_name, - CS.cluster_index_ramsize, CS.cluster_fts_ramsize, CS.cluster_eventing_ramsize, - CS.cluster_analytics_ramsize, CS.additional_services) == \ - CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.cluster_init()[2] == 0 - printlog(2) - # Negative test case - kwargs['test_type'] = CS.FAIL - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.cluster_init()[2] == 1 - printlog(3) - - -def test_xdcr_replicate_cmd(main_class, monkeypatch, mock_run_bash): - # positive test case - kwargs = {'cmd': 'xdcr_replicate_cmd', 'test_type': CS.PASS} - assert CommandFactory.xdcr_replicate(CS.shell_path, CS.source_hostname, CS.source_port, CS.source_username,CS.source_bucket_name, CS.target_bucket_name, CS.cluster_name, CS.hostname, CS.port, CS.username) == CS.CMD_TEST_DATA[kwargs['cmd']][0] - printlog(1) - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.xdcr_replicate(CS.bucket_name, CS.bucket_name) is None - printlog(2) - # Negative test case - kwargs['test_type'] = CS.FAIL - monkeypatch.setattr('test.test_database.utilities.libs.run_bash', mock_run_bash(None, None, **kwargs)) - assert main_class.xdcr_replicate(CS.bucket_name, CS.bucket_name) is None - printlog(3) diff --git a/test/test_plugin.py b/test/test_plugin.py deleted file mode 100644 index a52eee7..0000000 --- a/test/test_plugin.py +++ /dev/null @@ -1,188 +0,0 @@ -# -# Copyright (c) 2020 by Delphix. All rights reserved. -# -####################################################################################################################### - -import pytest -from src.utils import utilities -import test.constants as CS -from src.db_commands.commands import CommandFactory -from src.controller.helper_lib import get_snapshot_id,get_all_bucket_list_with_size, get_bucket_name_with_size,\ - get_bucket_size_in_MB - - - -def test_couchbase_factory(get_couchbase_object,virtual_source , staged_source, repository, - snapshot_parameters,source_connection, source_config, snapshot): - print ("creating object for the case: repository_discovery") - - kwargs = {'connection': source_connection} - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: source_config_discovery") - kwargs = {'connection': source_connection, 'repository': repository} - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: D_source Snapshot") - snapshot_parameters.resync=False - kwargs = {'source_config': source_config, 'repository': repository, 'staged_source': staged_source, - 'snapshot_parameters': snapshot_parameters } - - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: D_source status, D_source disable, D_source enable") - kwargs = {'source_config': source_config, 'repository': repository, 'staged_source': staged_source } - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: vdb configure") - kwargs = {'repository': repository, 'snapshot': snapshot, 'virtual_source': virtual_source} - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: vdb reconfigure") - kwargs = {'repository': repository, 'snapshot': snapshot, 'virtual_source': virtual_source, 'source_config': source_config} - obj = get_couchbase_object(**kwargs) - - print("creating object for the case: vdb pre_snapshot") - kwargs = {'repository': repository, 'source_config': source_config, 'virtual_source': virtual_source} - obj = get_couchbase_object(**kwargs) - - kwargs = {'repository': repository, 'source_config': source_config, 'dummy': virtual_source} - - with pytest.raises(Exception) as err: - obj = get_couchbase_object(**kwargs) - err.match(r'.*Invalid key passed*') - - -def test_snapshot_id_generation(): - print "Test snapshot id generator." - counter = 1 - total_snap_ids = [] - id = get_snapshot_id() - while counter <= 200 and id not in total_snap_ids: - print " Checking id : {}".format(id) - total_snap_ids.append(id) - id = get_snapshot_id() - counter = counter + 1 - assert counter == 201 - -def test_get_all_bucket_list_with_size(): - print "Validating the function get_all_bucket_list_with_size" - bucket_output="""beer-sample - bucketType: membase - numReplicas: 1 - ramQuota: 104857600 - ramUsed: 17992995844 -gamesim-sample - bucketType: membase - numReplicas: 1 - ramQuota: 94857600 - ramUsed: 14847224 -travel-sample - bucketType: membase - numReplicas: 1 - ramQuota: 114857600 - ramUsed: 104857600""" - output = get_all_bucket_list_with_size(bucket_output.split("\n")) - - for each_bucket in output: - bkt, ramUsed = each_bucket.split(",") - print bkt, ramUsed - if bkt == "beer-sample": - if ramUsed == "9896147714": - print " beer-sample data is correct" - else: - raise Exception("Invalid bucket size got for beer-sample") - elif bkt == "gamesim-sample": - if ramUsed == "104857600": - print " gamesim-sample data is correct" - else: - raise Exception("Invalid bucket size got for gamesim-sample") - elif bkt == "travel-sample": - if ramUsed == "104857600": - print " travel-sample data is correct" - else: - raise Exception("Invalid bucket size got for travel-sample") - elif bkt != " ": - raise Exception("Invalid bucket name identified") - - -def test_get_bucket_name_with_size(): - print "Finding specific bucket with size from bucket output" - bucket_output = """ - beer-sample - bucketType: membase - numReplicas: 1 - ramQuota: 104857600 - ramUsed: 17992995844 - gamesim-sample - bucketType: membase - numReplicas: 1 - ramQuota: 94857600 - ramUsed: 14847224 - travel-sample - bucketType: membase - numReplicas: 1 - ramQuota: 114857600 - ramUsed: 104857600 - """ - output = get_bucket_name_with_size(bucket_output.split("\n"), "beer-sample") - print output - if output.split(',')[1] == "9896147714": - print " Verified for beer-sample" - output = get_bucket_name_with_size(bucket_output.split("\n"), "gamesim-sample") - if output.split(',')[1] == "104857600": - print " Verified for gamesim-sample" - output = get_bucket_name_with_size(bucket_output.split("\n"), "travel-sample") - if output.split(',')[1] == "104857600": - print " Verified for travel-sample" - #TODO - #Add test case for bucket which is not present - #Add test case for bucket name with special chars - - -def test_get_bucket_size_in_MB(): - print ("Testing conversion of bucket size into MegaByte") - case="" - output = get_bucket_size_in_MB(0, 10000000) - print output - if output!=9: - case+="1, " - output = get_bucket_size_in_MB(1, 1) - if output!=1: - case += "2, " - print output - output = get_bucket_size_in_MB(0, 1) - if output != 0: - case += "3, " - print output - output = get_bucket_size_in_MB(0, 0) - if output != 0: - case += "4, " - print output - output = get_bucket_size_in_MB(100000, 1) - if output != 100000: - case += "5, " - print output - if case!="": - pytest.fail(" Failed for the case : {}".format(case)) - - - - - - - - - - - - - - - - - - - - - diff --git a/test/virtualEnvSetup.sh b/test/virtualEnvSetup.sh deleted file mode 100755 index 180f324..0000000 --- a/test/virtualEnvSetup.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -#this script creates a virtual environment and install the below packages: -#dvp==2.1.0 -#pytest==4.6.10 -#pytest-cov -#pytest-html -DEFAULT_ENV_NAME="env" - -printMessage(){ - echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" - echo "$1" - echo "" -} - -if [[ "$1" == "-h" ]]; then - printMessage "" - echo "Usage: `basename $0` [Provide the virtual environment to create. For example: ./virtualEnvSetup.sh 'CouchbaseSetup']" - echo " Default environment name is: $DEFAULT_ENV_NAME" - echo " List of python libraries which will be installed in this set up: dvp, pytest, pytest-cov, pytest-html" - printMessage "" - exit 0 -fi - -env_name=$1 -if [[ "$env_name" == "" ]] ; then - env_name=${DEFAULT_ENV_NAME} - printMessage "Setting default virtual environment name as: $env_name" -fi - -ROOT_DIR=$PWD/test -VIRTUAL_ENV_PATH="${ROOT_DIR}"/${env_name} -if [[ -d ${VIRTUAL_ENV_PATH} ]] ; then - printMessage "${VIRTUAL_ENV_PATH} exist, please provide different name" - exit 0 -fi - -printMessage "Virtual environment name is : $env_name, Path: $ROOT_DIR" - -virtualenv -p python2.7 ${VIRTUAL_ENV_PATH} -if [[ $? -ne 0 ]]; then - rm -rf ${VIRTUAL_ENV_PATH} - printMessage "Virtual environment creation failed, removed ${VIRTUAL_ENV_PATH}" - exit 1 -fi - -. "${VIRTUAL_ENV_PATH}"/bin/activate -if [[ $? -ne 0 ]]; then - rm -rf ${VIRTUAL_ENV_PATH} - printMessage "Virtual environment activation failed, removed ${VIRTUAL_ENV_PATH}" - exit 1 -fi - - -pip install -r "${ROOT_DIR}"/requirements.txt -if [[ $? -ne 0 ]]; then - deactivate - rm -rf ${VIRTUAL_ENV_PATH} - printMessage "Package installation failed, removed ${VIRTUAL_ENV_PATH}" - exit 1 -fi - -printMessage "Set up completed in virtual environment ${VIRTUAL_ENV_FOLDER}" -deactivate -printMessage "Deactivated the environment. Execute: . test/${env_name}/bin/activate to activate again" -printMessage "To delete this setup, execute: rm -rf test/$env_name" diff --git a/.github/workflows/pre-commit.yml b/workflow_bkp_precommit similarity index 100% rename from .github/workflows/pre-commit.yml rename to workflow_bkp_precommit