diff --git a/docs/scheduledtasks.md b/docs/scheduledtasks.md index bf81259ff..cec231756 100644 --- a/docs/scheduledtasks.md +++ b/docs/scheduledtasks.md @@ -16,8 +16,8 @@ Example configuration: ``` -To disblale the automatic scrubbing job add `"ovs.generic.execute_scrub": null` to the JSON object. -In case you want to change the schedule for the ALBA backend verifictaion process which checks the state of each object in the backend, add `"alba.verify_namespaces": {"minute": "0", "hour": "0", "month_of_year": "*/X"}` where X is the amount of months between each run. +To disable the automatic scrubbing job add `"ovs.generic.execute_scrub": null` to the JSON object. +In case you want to change the schedule for the ALBA backend verification process which checks the state of each object in the backend, add `"alba.verify_namespaces": {"minute": "0", "hour": "0", "month_of_year": "*/X"}` where X is the amount of months between each run. In case the configuration cannot be parsed at all (e.g. invalid JSON), the code will fallback to the hardcoded schedule. If the crontab arguments are invalid (e.g. they contain an unsupported key) the task will be disabled. diff --git a/docs/snapshots.md b/docs/snapshots.md new file mode 100644 index 000000000..7e93ba12f --- /dev/null +++ b/docs/snapshots.md @@ -0,0 +1,91 @@ +### Snapshot management +The Framework will, by default, create snapshots of every vDisk every hour +(can be adjusted. See docs/scheduledtasks.md). + +To keep the snapshots manageable overtime, the Framework schedules a clean-up every day to enforce a retention policy. +This automatic task will: +- Create an overview of the all the snapshots for every volume +- Skip the first 24 hours (allows the user to create as many snaphots as he wants daily) +- Enforce the retention policy + +The default retention policy is: +- a single snapshot is kept for the first 7 days after that + - Prioritizes consistent snapshots over older ones for the first day in the policy + (which is 2 days back, starting from now) +- A single snapshot is kept for the 2nd, 3rd and 4th week to have a single snapshot of the week for the first month +- All older snapshots are discarded + +#### Configuring the retention policy +A retention policy can be configured so the scheduled task will enforce a different one from the default. + +It can be customized on: +- Global level, enforces the policy to all vDisks within the cluster +- VPool level, overrides the global level, enforce to all vDisks within the vPool +- VDisk level, overrides the global and vPool level, enforce to this vDisk only + +The notation of the policy is a list containing policies. A policies consists minimally of `nr_of_snapshots`, which +is the the number of snapshots to have over the given `nr_of_days`, and `nr_of_days` which is the number of days to span +the `nr_of_snapshots` over. This notation allows for some fine grained control while also being easy to configure. +Since we are working with days, *monthly and weekly policies will not follow the calendar days!* + +There are two additional options available: `consistency_first` +which indicates that: +- this policy has to search for the oldest consistent snapshot instead of oldest one +- When no consistent snapshot was found, find the oldest snapshot + +If a policy interval spans multiple days, the `consistency_first_on` can be configured to narrow the days down +to apply the `consistency_first` rules +This options takes in a list of day numbers. + + +If we were to write out the default retention policy, it would look like: +``` +[# one per day for the week and opt for a consistent snapshot for the first day + {'nr_of_snapshots': 7, 'nr_of_days': 7, 'consistency_first': True, 'consistency_first_on': [1]}, + # One per week for the rest of the month + {'nr_of_snapshots': 3, 'nr_of_days': 21}] +``` + +Configuring it on different levels can be done using the API: +- Global level: POST to: `'/storagerouters//global_snapshot_retention_policy'` +- vPool level: POST to: `/vpools//snapshot_retention_policy` +- vDisk level: POST to: `/vdisks//snapshot_retention_policy` + +##### Examples: +The examples simplify a week as 7 days and months as 4 * 7 days. + +I wish to keep hourly snapshots from the first week +``` +[{'nr_of_days': 7, # A week spans 7 days + 'nr_of_snapshots': 168}] # Keep 24 snapshot for every day for 7 days: 7 * 24 +``` +I wish to keep hourly snapshots from the first week and one for every week for the whole year +``` +[ # First policy + {'nr_of_days': 7, # A week spans 7 days + 'nr_of_snapshots': 7 * 24}, # Keep 24 snapshot for every day for 7 days: 7 * 24 + # Second policy + {'nr_of_days': 7 * (52 - 1), # The first week is already covered by the previous policy, so 52 - 1 weeks remaining + 'nr_of_snapshots': 1 * (52 - 1)} +] +``` + +A production use case could be: +``` +[ # First policy - keep the first 24 snapshots + {'nr_of_days': 1, + 'nr_of_snapshots': 24 }, + # Second policy - Keep 4 snapshots a day for the remaining week (6 leftover days) + {'nr_of_days': 6, + 'nr_of_snapshots': 4 * 6}, + # Third policy - keep 1 snapshot per day for the 3 weeks to come + {'nr_of_days': 3 * 7, + 'nr_of_snapshots': 3 * 7] + # Fourth policy - keep 1 snapshot per week for the next 5 months + {'nr_of_days': 4 * 7 * 5, # Use the week notation to avoid issues (4 * 7 days = month) + 'nr_of_snapshots': 5 * 7 + # Fift policy - first 6 months are configured by now - Keep a snapshot every 6 month until 2 years have passed + {'nr_of_days': (4 * 7) * (6 * 3), + 'nr_of_snapshots': 3} + ] + ``` \ No newline at end of file diff --git a/ovs/constants/vdisk.py b/ovs/constants/vdisk.py index 1e8b3c00d..5232cf9d5 100644 --- a/ovs/constants/vdisk.py +++ b/ovs/constants/vdisk.py @@ -17,6 +17,7 @@ """ VDisk Constants module. Contains constants related to vdisks """ +import os # General LOCK_NAMESPACE = 'ovs_locks' @@ -24,3 +25,11 @@ # Scrub related SCRUB_VDISK_LOCK = '{0}_{{0}}'.format(LOCK_NAMESPACE) # Second format is the vdisk guid SCRUB_VDISK_EXCEPTION_MESSAGE = 'VDisk is being scrubbed. Unable to remove snapshots at this time' + +# Snapshot related +# Note: the scheduled task will always skip the first 24 hours before enforcing the policy +SNAPSHOT_POLICY_DEFAULT = [# one per day for rest of the week and opt for a consistent snapshot for the first day + {'nr_of_snapshots': 7, 'nr_of_days': 7, 'consistency_first': True, 'consistency_first_on': [1]}, + # One per week for the rest of the month + {'nr_of_snapshots': 3, 'nr_of_days': 21}] +SNAPSHOT_POLICY_LOCATION = os.path.join(os.path.sep, 'ovs', 'cluster', 'snapshot_retention_policy') diff --git a/ovs/dal/hybrids/vdisk.py b/ovs/dal/hybrids/vdisk.py index 7c2a4b205..66a63603f 100644 --- a/ovs/dal/hybrids/vdisk.py +++ b/ovs/dal/hybrids/vdisk.py @@ -51,7 +51,8 @@ class VDisk(DataObject): Property('pagecache_ratio', float, default=1.0, doc='Ratio of the volume\'s metadata pages that needs to be cached'), Property('metadata', dict, default=dict(), doc='Contains fixed metadata about the volume (e.g. lba_size, ...)'), Property('cache_quota', dict, mandatory=False, doc='Maximum caching space(s) this volume can consume (in Bytes) per cache type. If not None, the caching(s) for this volume has been set manually'), - Property('scrubbing_information', dict, mandatory=False, doc='Scrubbing metadata set by scrubber with an expiration date')] + Property('scrubbing_information', dict, mandatory=False, doc='Scrubbing metadata set by scrubber with an expiration date'), + Property('snapshot_retention_policy', list, mandatory=False, doc='Snapshot retention policy configuration')] __relations = [Relation('vpool', VPool, 'vdisks'), Relation('parent_vdisk', None, 'child_vdisks', mandatory=False)] __dynamics = [Dynamic('dtl_status', str, 60), diff --git a/ovs/dal/hybrids/vpool.py b/ovs/dal/hybrids/vpool.py index d147f531f..85cb507b3 100644 --- a/ovs/dal/hybrids/vpool.py +++ b/ovs/dal/hybrids/vpool.py @@ -46,7 +46,8 @@ class VPool(DataObject): Property('metadata', dict, mandatory=False, doc='Metadata for the backends, as used by the Storage Drivers.'), Property('rdma_enabled', bool, default=False, doc='Has the vpool been configured to use RDMA for DTL transport, which is only possible if all storagerouters are RDMA capable'), Property('status', STATUSES.keys(), doc='Status of the vPool'), - Property('metadata_store_bits', int, mandatory=False, doc='StorageDrivers deployed for this vPool will make use of this amount of metadata store bits')] + Property('metadata_store_bits', int, mandatory=False, doc='StorageDrivers deployed for this vPool will make use of this amount of metadata store bits'), + Property('snapshot_retention_policy', list, mandatory=False, doc='Snapshot retention policy configuration')] __relations = [] __dynamics = [Dynamic('configuration', dict, 3600), Dynamic('statistics', dict, 4), diff --git a/ovs/lib/generic.py b/ovs/lib/generic.py index f86f42831..1070e2b98 100644 --- a/ovs/lib/generic.py +++ b/ovs/lib/generic.py @@ -23,13 +23,10 @@ from celery import group from celery.utils import uuid from celery.result import GroupResult -from datetime import datetime, timedelta +from datetime import timedelta from threading import Thread -from time import mktime -from ovs.constants.vdisk import SCRUB_VDISK_EXCEPTION_MESSAGE from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.storagedriver import StorageDriver -from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.lists.servicelist import ServiceList from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.dal.lists.storagerouterlist import StorageRouterList @@ -41,6 +38,7 @@ from ovs.lib.helpers.toolbox import Toolbox, Schedule from ovs.lib.vdisk import VDiskController from ovs.log.log_handler import LogHandler +from ovs.lib.helpers.generic.snapshots import SnapshotManager class GenericController(object): @@ -90,160 +88,48 @@ def delete_snapshots(timestamp=None): :return: The GroupResult :rtype: GroupResult """ + if os.environ.get('RUNNING_UNITTESTS') == 'False': + assert timestamp is None, 'Providing a timestamp is only possible during unittests' + # The result cannot be fetched in this task group_id = uuid() return group(GenericController.delete_snapshots_storagedriver.s(storagedriver.guid, timestamp, group_id) for storagedriver in StorageDriverList.get_storagedrivers()).apply_async(task_id=group_id) @staticmethod - @ovs_task(name='ovs.generic.delete_snapshots_storagedriver', ensure_single_info={'mode': 'DEDUPED'}) + @ovs_task(name='ovs.generic.delete_snapshots_storagedriver', ensure_single_info={'mode': 'DEDUPED', 'ignore_arguments': ['timestamp', 'group_id']}) def delete_snapshots_storagedriver(storagedriver_guid, timestamp=None, group_id=None): + # type: (str, float, str) -> Dict[str, List[str]] """ - Delete snapshots per storagedriver & scrubbing policy + Delete snapshots & scrubbing policy - Implemented delete snapshot policy: + Implemented default delete snapshot policy: < 1d | 1d bucket | 1 | best of bucket | 1d < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m > 1m | delete - + The configured policy can differ from this one. :param storagedriver_guid: Guid of the StorageDriver to remove snapshots on :type storagedriver_guid: str - :param timestamp: Timestamp to determine whether snapshots should be kept or not, if none provided, current time will be used + :param timestamp: Timestamp to determine whether snapshots should be kept or not, + if none provided, the current timestamp - 1 day is used. Used in unittesting only! + The scheduled task will not remove snapshots of the current day this way! :type timestamp: float :param group_id: ID of the group task. Used to identify which snapshot deletes were called during the scheduled task :type group_id: str - :return: None + :return: Dict with vdisk guid as key, deleted snapshot ids as value + :rtype: dict """ - if group_id: - log_id = 'Group job {} - '.format(group_id) - else: - log_id = '' - - def format_log(message): - return '{}{}'.format(log_id, message) - - GenericController._logger.info(format_log('Delete snapshots started for StorageDriver {0}'.format(storagedriver_guid))) - - storagedriver = StorageDriver(storagedriver_guid) - exceptions = [] - - day = timedelta(1) - week = day * 7 + if os.environ.get('RUNNING_UNITTESTS') == 'False': + assert timestamp is None, 'Providing a timestamp is only possible during unittests' - def make_timestamp(offset): - """ - Create an integer based timestamp - :param offset: Offset in days - :return: Timestamp - """ - return int(mktime((base - offset).timetuple())) - - # Calculate bucket structure if timestamp is None: - timestamp = time.time() - base = datetime.fromtimestamp(timestamp).date() - day - buckets = [] - # Buckets first 7 days: [0-1[, [1-2[, [2-3[, [3-4[, [4-5[, [5-6[, [6-7[ - for i in xrange(0, 7): - buckets.append({'start': make_timestamp(day * i), - 'end': make_timestamp(day * (i + 1)), - 'type': '1d', - 'snapshots': []}) - # Week buckets next 3 weeks: [7-14[, [14-21[, [21-28[ - for i in xrange(1, 4): - buckets.append({'start': make_timestamp(week * i), - 'end': make_timestamp(week * (i + 1)), - 'type': '1w', - 'snapshots': []}) - buckets.append({'start': make_timestamp(week * 4), - 'end': 0, - 'type': 'rest', - 'snapshots': []}) - - # Get a list of all snapshots that are used as parents for clones - parent_snapshots = set([vd.parentsnapshot for vd in VDiskList.get_with_parent_snaphots()]) - - # Place all snapshots in bucket_chains - bucket_chains = [] - for vdisk_guid in storagedriver.vdisks_guids: - try: - vdisk = VDisk(vdisk_guid) - vdisk.invalidate_dynamics('being_scrubbed') - if vdisk.being_scrubbed: - continue + timestamp = time.time() - timedelta(1).total_seconds() - if vdisk.info['object_type'] in ['BASE']: - bucket_chain = copy.deepcopy(buckets) - for snapshot in vdisk.snapshots: - if snapshot.get('is_sticky') is True: - continue - if snapshot['guid'] in parent_snapshots: - GenericController._logger.info(format_log('Not deleting snapshot {0} because it has clones'.format(snapshot['guid']))) - continue - timestamp = int(snapshot['timestamp']) - for bucket in bucket_chain: - if bucket['start'] >= timestamp > bucket['end']: - bucket['snapshots'].append({'timestamp': timestamp, - 'snapshot_id': snapshot['guid'], - 'vdisk_guid': vdisk.guid, - 'is_consistent': snapshot['is_consistent']}) - bucket_chains.append(bucket_chain) - except Exception as ex: - exceptions.append(ex) - - # Clean out the snapshot bucket_chains, we delete the snapshots we want to keep - # And we'll remove all snapshots that remain in the buckets - for bucket_chain in bucket_chains: - first = True - for bucket in bucket_chain: - if first is True: - best = None - for snapshot in bucket['snapshots']: - if best is None: - best = snapshot - # Consistent is better than inconsistent - elif snapshot['is_consistent'] and not best['is_consistent']: - best = snapshot - # Newer (larger timestamp) is better than older snapshots - elif snapshot['is_consistent'] == best['is_consistent'] and \ - snapshot['timestamp'] > best['timestamp']: - best = snapshot - bucket['snapshots'] = [s for s in bucket['snapshots'] if - s['timestamp'] != best['timestamp']] - first = False - elif bucket['end'] > 0: - oldest = None - for snapshot in bucket['snapshots']: - if oldest is None: - oldest = snapshot - # Older (smaller timestamp) is the one we want to keep - elif snapshot['timestamp'] < oldest['timestamp']: - oldest = snapshot - bucket['snapshots'] = [s for s in bucket['snapshots'] if - s['timestamp'] != oldest['timestamp']] - - # Delete obsolete snapshots - for bucket_chain in bucket_chains: - # Each bucket chain represents one vdisk's snapshots - try: - for bucket in bucket_chain: - for snapshot in bucket['snapshots']: - VDiskController.delete_snapshot(vdisk_guid=snapshot['vdisk_guid'], - snapshot_id=snapshot['snapshot_id']) - except RuntimeError as ex: - vdisk_guid = next((snapshot['vdisk_guid'] for bucket in bucket_chain for snapshot in bucket['snapshots']), '') - vdisk_id_log = '' - if vdisk_guid: - vdisk_id_log = ' for VDisk with guid {}'.format(vdisk_guid) - if SCRUB_VDISK_EXCEPTION_MESSAGE in ex.message: - GenericController._logger.warning(format_log('Being scrubbed exception occurred while deleting snapshots{}'.format(vdisk_id_log))) - else: - GenericController._logger.exception(format_log('Exception occurred while deleting snapshots{}'.format(vdisk_id_log))) - exceptions.append(ex) - if exceptions: - raise RuntimeError('Exceptions occurred while deleting snapshots: \n- {}'.format('\n- '.join((str(ex) for ex in exceptions)))) - GenericController._logger.info(format_log('Delete snapshots finished for StorageDriver {0}')) + GenericController._logger.info('Delete snapshots started') + storagedriver = StorageDriver(storagedriver_guid) + snapshot_manager = SnapshotManager(storagedriver, group_id) + return snapshot_manager.delete_snapshots(timestamp) @staticmethod @ovs_task(name='ovs.generic.execute_scrub', schedule=Schedule(minute='0', hour='3'), ensure_single_info={'mode': 'DEDUPED'}) diff --git a/ovs/lib/helpers/generic/snapshots.py b/ovs/lib/helpers/generic/snapshots.py new file mode 100644 index 000000000..75adca5a7 --- /dev/null +++ b/ovs/lib/helpers/generic/snapshots.py @@ -0,0 +1,449 @@ +# Copyright (C) 2019 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. + +import time +from datetime import datetime, timedelta +from ovs.constants.vdisk import SNAPSHOT_POLICY_DEFAULT, SNAPSHOT_POLICY_LOCATION, SCRUB_VDISK_EXCEPTION_MESSAGE +from ovs.dal.hybrids.storagedriver import StorageDriver +from ovs.dal.hybrids.vdisk import VDisk +from ovs.dal.hybrids.vpool import VPool +from ovs.dal.lists.vpoollist import VPoolList +from ovs.dal.lists.vdisklist import VDiskList +from ovs_extensions.generic.toolbox import ExtensionsToolbox +from ovs.extensions.generic.configuration import Configuration +from ovs.lib.vdisk import VDiskController +from ovs.log.log_handler import LogHandler + +_logger = LogHandler.get('lib', name='generic tasks') +DAY = timedelta(1) + + +class RetentionPolicy(object): + + PARAMS = {'nr_of_snapshots': (int, {'min': 1}, True), + 'nr_of_days': (int, {'min': 1}, True), + 'consistency_first': (bool, None, True), + 'consistency_first_on': (list, int, True)} + + def __init__(self, nr_of_snapshots, nr_of_days, consistency_first=False, consistency_first_on=None): + # type: (int, int, bool, List[int]) -> None + """ + Initialize a retention policy + :param nr_of_snapshots: Number of snapshots to keep over the configured number of days + :type nr_of_snapshots: int + :param nr_of_days: Number of days to account the number of snapshots for + :type nr_of_days: int + :param consistency_first: Consistency of the snapshot is prioritized above the age + :type consistency_first: bool + :param consistency_first_on: Apply the consistency first on the snapsnot numbers given + :type consistency_first_on: List[int] + """ + if consistency_first_on is None: + consistency_first_on = [] + + ExtensionsToolbox.verify_required_params(actual_params=locals(), required_params=self.PARAMS) + self.nr_of_snapshots = nr_of_snapshots + self.nr_of_days = nr_of_days + self.consistency_first = consistency_first + self.consistency_first_on = consistency_first_on + + @classmethod + def from_configuration(cls, configuration): + # type: (List[Dict[str, int]]) -> List[RetentionPolicy] + """ + A configuration should look like this: + [{'nr_of_snapshots': 24, 'nr_of_days': 1}, + {'nr_of_snapshots': 6, 'nr_of_days': 6}, + {'nr_of_snapshots': 3, 'nr_of_days': 21}]) + The passed number of snapshots is an absolute number of snapshots and is evenly distributed across the number of days passed in the interval. + This way, this config will result in storing + one snapshot per hour the first day + one snapshot per day the rest of the week + one snapshot per week the rest of the month + :param configuration: Configuration to use + :type configuration: List[Dict[str, int]] + :return: List[RetentionPolicy] + """ + return [cls(**c) for c in configuration] + + def __eq__(self, other): + # type: (RetentionPolicy) -> bool + """ + Equality operator + :param other: Other instance + :type other: RetentionPolicy + :return: True if equal else False + :rtype: bool + """ + if not isinstance(other, RetentionPolicy): + return NotImplemented('Comparing to other types is not implemented') + return vars(self) == vars(other) + + +class Snapshot(object): + + def __init__(self, guid, timestamp, label, is_consistent, is_automatic, is_sticky, in_backend, stored, vdisk_guid, *args, **kwargs): + # type: (str, int, str, bool, bool, bool, bool, int, str, *any, **any) -> None + """ + Initialize a snapshot object + :param guid: ID of the snapshot + :type guid: str + :param timestamp: Timestamp of the snapshot + :type timestamp: int + :param label: Snapshot label + :type label: str + :param is_consistent: Indicator that the snapshot is consistent + :type is_consistent: bool + :param is_automatic: Indicator that the snapshot is created automatically + :type is_automatic: bool + :param is_sticky: Indicator that the snapshot is a sticky one + :type + """ + self.guid = guid + self.timestamp = int(timestamp) + self.label = label + self.is_automatic = is_automatic + self.consistent = is_consistent + self.is_sticky = is_sticky + self.in_backend = in_backend + self.stored = stored + self.vdisk_guid = vdisk_guid + + def __str__(self): + """ + String representation + """ + prop_strings = ['{}: {}'.format(prop, val) for prop, val in vars(self).iteritems()] + prop_strings.append('humanized timestamp: {}'.format(datetime.fromtimestamp(self.timestamp).strftime('%Y-%m-%d %H:%M'))) + return 'Snapshot for vDisk {0} ({1})'.format(self.vdisk_guid, ', '.join(prop_strings)) + + +class Bucket(object): + """ + Represents a bucket that holds items within a time frame + """ + def __init__(self, start, end, retention_policy=None): + # type: (int, int, RetentionPolicy) -> None + """ + Initialize a bucket + :param start: Start timestamp + :type start: int + :param end: End timestamp. 0 indicates that it has no end + :type end: int + :param retention_policy: Optional: associated retention policy. Used to determine the obsolete snapshots within the bucket + :type retention_policy: RetentionPolicy + """ + self.start = start + self.end = end + self.snapshots = [] + self.retention_policy = retention_policy + + def is_snapshot_in_interval(self, snapshot): + # type: (Snapshot) -> bool + """ + Determine if a snapshot fits within the current time interval + :param snapshot: Snapshot to check + :type snapshot: Snapshot + :return: True if the snapshot fits else False + :rtype: bool + """ + return self.start >= snapshot.timestamp > self.end + + def try_add_snapshot(self, snapshot): + # type: (Snapshot) -> bool + """ + Try to add the snapshot to the bucket + :param snapshot: Snapshot to try + :return: True if the snapshot could be added else False + :rtype: bool + """ + if self.is_snapshot_in_interval(snapshot): + self.snapshots.append(snapshot) + return True + return False + + def get_obsolete_snapshots(self, consistency_first=False, bucket_count=0): + # type: (bool, int) -> List[Snapshot] + """ + Retrieve all snapshots which are no longer within this interval + :param consistency_first: Consistency of the snapshot is prioritized above the age + :type consistency_first: bool + :param bucket_count: Number of the bucket in the chain. Used to determine if the current snapshot must be consistent + :type bucket_count: int + :return: List with Snapshots + :rtype: List[Snapshot] + """ + _ = consistency_first + + if self.end: + snapshot_to_keep = None + if self.retention_policy.consistency_first: + # Using + 1 as snapshot provided in the consistency_first_on are > 0 + if self.retention_policy.consistency_first_on and bucket_count + 1 in self.retention_policy.consistency_first_on: + best = None + for snapshot in self.snapshots: + if best is None: + best = snapshot + # Consistent is better than inconsistent + elif snapshot.consistent and not best.consistent: + best = snapshot + # Newer (larger timestamp) is better than older snapshots + elif snapshot.consistent == best.consistent and snapshot.timestamp > best.timestamp: + best = snapshot + snapshot_to_keep = best + if not snapshot_to_keep: + # First the oldest snapshot and remove all younger ones + oldest = None + for snapshot in self.snapshots: + if oldest is None: + oldest = snapshot + # Older (smaller timestamp) is the one we want to keep + elif snapshot.timestamp < oldest.timestamp: + oldest = snapshot + snapshot_to_keep = oldest + _logger.debug('Elected {} as the snapshot to keep within {}.'.format(snapshot_to_keep, self)) + obsolete_snapshots = [s for s in self.snapshots if s != snapshot_to_keep] + else: + # No end date for the interval, every snapshot is obsolete + obsolete_snapshots = self.snapshots + _logger.debug('Marking {} as obsolete within {} ({} in total)'.format(', '.join([str(s) for s in obsolete_snapshots]), self, len(obsolete_snapshots))) + return obsolete_snapshots + + def __str__(self): + """ + Stringified representation + """ + humanized_start = datetime.fromtimestamp(self.start).strftime('%Y-%m-%d %H:%M') + humanized_end = datetime.fromtimestamp(self.end).strftime('%Y-%m-%d %H:%M') if self.end else self.end + return 'Bucket (start: {0}, end: {1}) with [{2}]'.format(humanized_start, humanized_end, ','.join(str(s) for s in self.snapshots)) + + +class SnapshotManager(object): + """ + Manages snapshots of all vdisks on the storagedriver + """ + + def __init__(self, storagedriver, group_id=None): + # type: (StorageDriver, str) -> None + """ + :param storagedriver: StorageDriver to remove snapshots on + :type storagedriver: StorageDriver + :param group_id: ID of the group task. Used to identify which snapshot deletes were called during the scheduled task + :type group_id: str + """ + + self.storagedriver = storagedriver + self.group_id = group_id + self.log_id = 'Group job {} - '.format(group_id) if group_id else '' + self.global_policy = self.get_retention_policy() + self.vpool_policy = self.get_retention_policy_vpool(storagedriver.vpool) + + def get_policy_to_enforce(self, vdisk): + # type: (VDisk) -> List[RetentionPolicy] + """ + Retrieve the policy to enforce for a VDisk + :param vdisk: VDisk to retrieve policy for + :type vdisk: VDisk + :return: Policy to enforce + :rtype: List[RetentionPolicy] + """ + return self.get_retention_policy_vdisk(vdisk) or self.vpool_policy or self.global_policy + + @staticmethod + def get_retention_policy(): + # type: () -> List[RetentionPolicy] + """ + Retrieve the globally configured retention policy + """ + return RetentionPolicy.from_configuration(Configuration.get(SNAPSHOT_POLICY_LOCATION, default=SNAPSHOT_POLICY_DEFAULT)) + + @classmethod + def get_retention_policies_for_vpools(cls): + # type: () -> Dict[VPool, List[RetentionPolicy]] + """ + Map VPool with its retention policy (if any) + :return: Dict with VPool as keys and list of RetentionPolicy as value + :rtype: Dict[VPool, List[RetentionPolicy]] + """ + vpool_policies = {} + for vpool in VPoolList.get_vpools(): + policies_config = cls.get_retention_policy_vpool(vpool) + if policies_config: + vpool_policies[vpool] = policies_config + return vpool_policies + + @staticmethod + def get_retention_policy_vpool(vpool): + # type: (VPool) -> Union[List[RetentionPolicy], None] + """ + Retrieve the retention policy for the VPool (if any) + """ + snapshot_retention_policy = vpool.snapshot_retention_policy + if snapshot_retention_policy: + return RetentionPolicy.from_configuration(vpool.snapshot_retention_policy) + return None + + @staticmethod + def get_retention_policy_vdisk(vdisk): + # type: (VDisk) -> Union[List[RetentionPolicy], None] + """ + Retrieve the retention policy for the VDisk (if any) + """ + snapshot_retention_policy = vdisk.snapshot_retention_policy + if snapshot_retention_policy: + return RetentionPolicy.from_configuration(vdisk.snapshot_retention_policy) + return None + + @staticmethod + def make_timestamp(base, offset): + # type: (datetime, timedelta) -> int + """ + Create an integer based timestamp based on a datetime and a timedelta + :param base: Base timestamp + :type base: datetime + :param offset: Offset in days + :type offset: timedelta + :return: Timestamp + """ + return int(time.mktime((base - offset).timetuple())) + + @classmethod + def _get_snapshot_buckets(cls, start_time, policies): + # type: (datetime, List[RetentionPolicy]) -> List[Bucket] + """ + Retrieve the bucket distribution based on the policies + There is no overlapping period possible. + Eg [{'nr_of_days': 1, 'nr_of_snapshots': 1}, {'nr_of_days': 2, 'nr_of_snapshots': 1}] spans three days, not two + There is always an additional bucket to keep track of older snapshots + :param start_time: Datetime to start counting from + :type start_time: datetime + :param policies: Retention policies to enforce + :type policies: RetentionPolicies + :return: + """ + buckets = [] + processed_retention_days = 0 + + for policy in policies: # type: RetentionPolicy + offset = processed_retention_days * DAY + number_of_days = policy.nr_of_days + number_of_snapshots = policy.nr_of_snapshots + snapshot_timedelta = number_of_days * DAY / number_of_snapshots + for i in xrange(0, number_of_snapshots): + buckets.append(Bucket(start=cls.make_timestamp(start_time, offset + snapshot_timedelta * i), + end=cls.make_timestamp(start_time, offset + snapshot_timedelta * (i + 1)), + retention_policy=policy)) + processed_retention_days += number_of_days + # Always add a bucket which falls out of the configured retention + buckets.append(Bucket(start=cls.make_timestamp(start_time, processed_retention_days * DAY), end=0)) + return buckets + + @staticmethod + def is_vdisk_running(vdisk): + # type: (VDisk) -> bool + """ + Determine if the VDisk is running + :return: True if the vdisk is running + :rtype: bool + """ + return vdisk.info['object_type'] in ['BASE'] + + def group_format_log(self, message): + # type: (str) -> str + """ + Adds group information to the log message + :param message: Message to log + :type message: str + :return: The formatted message + :rtype: str + """ + return '{}{}'.format(self.log_id, message) + + def delete_snapshots(self, timestamp): + # type: (float) -> Dict[str, List[str]] + """ + Delete snapshots & scrubbing policy + + Implemented default delete snapshot policy: + < 1d | 1d bucket | 1 | best of bucket | 1d + < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w + < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m + > 1m | delete + + :param timestamp: Timestamp to determine whether snapshots should be kept or not + :type timestamp: float + :return: Dict with vdisk guid as key, deleted snapshot ids as value + :rtype: dict + :raises: RuntimeError if any exception occurred during the snapshot deletion + """ + start_time = datetime.fromtimestamp(timestamp) + + # Get a list of all snapshots that are used as parents for clones + parent_snapshots = set([vd.parentsnapshot for vd in VDiskList.get_with_parent_snaphots()]) + + # Distribute all snapshots into buckets. These buckets specify an interval and are ordered young to old + bucket_chains = [] + exceptions = [] + for vdisk_guid in self.storagedriver.vdisks_guids: + try: + vdisk = VDisk(vdisk_guid) + if not self.is_vdisk_running(vdisk): + continue + vdisk.invalidate_dynamics('being_scrubbed') + if vdisk.being_scrubbed: + continue + + bucket_chain = self._get_snapshot_buckets(start_time, self.get_policy_to_enforce(vdisk)) + for vdisk_snapshot in vdisk.snapshots: + snapshot = Snapshot(vdisk_guid=vdisk.guid, **vdisk_snapshot) + if snapshot.is_sticky: + continue + if snapshot.guid in parent_snapshots: + _logger.info(self.group_format_log('Not deleting snapshot {0} because it has clones'.format(snapshot.vdisk_guid))) + continue + for bucket in bucket_chain: + bucket.try_add_snapshot(snapshot) + bucket_chains.append((vdisk, bucket_chain)) + except Exception as ex: + exceptions.append(ex) + + # Delete obsolete snapshots + removed_snapshot_map = {} + for index, vdisk_bucket_chain in enumerate(bucket_chains): + vdisk, bucket_chain = vdisk_bucket_chain + # @todo this consistency first behaviour changed with the new implementation + # There are now buckets based on hourly intervals which means the consistency of the first day is not guaranteed (unless the config is specified that way) + # consistency_first = index == 0 + try: + for bucket in bucket_chain: + obsolete_snapshots = bucket.get_obsolete_snapshots(False, index) + for snapshot in obsolete_snapshots: + deleted_snapshots = removed_snapshot_map.get(snapshot.vdisk_guid, []) + VDiskController.delete_snapshot(vdisk_guid=snapshot.vdisk_guid, snapshot_id=snapshot.guid) + deleted_snapshots.append(snapshot.guid) + removed_snapshot_map[snapshot.vdisk_guid] = deleted_snapshots + except RuntimeError as ex: + vdisk_log = ' for VDisk with guid {}'.format(vdisk.guid) + if SCRUB_VDISK_EXCEPTION_MESSAGE in ex.message: + _logger.warning(self.group_format_log('Being scrubbed exception occurred while deleting snapshots{}'.format(vdisk_log))) + else: + _logger.exception(self.group_format_log('Exception occurred while deleting snapshots{}'.format(vdisk_log))) + exceptions.append(ex) + + if exceptions: + raise RuntimeError('Exceptions occurred while deleting snapshots: \n- {}'.format('\n- '.join((str(ex) for ex in exceptions)))) + + return removed_snapshot_map diff --git a/ovs/lib/tests/generic_tests/test_snapshot.py b/ovs/lib/tests/generic_tests/test_snapshot.py index c5da3d186..a0b80eff2 100644 --- a/ovs/lib/tests/generic_tests/test_snapshot.py +++ b/ovs/lib/tests/generic_tests/test_snapshot.py @@ -17,22 +17,31 @@ """ Generic test module """ -import os import time import datetime import unittest -from ovs.constants.vdisk import SCRUB_VDISK_EXCEPTION_MESSAGE +from ovs.constants.vdisk import SNAPSHOT_POLICY_LOCATION, SCRUB_VDISK_EXCEPTION_MESSAGE +from ovs.dal.hybrids.storagedriver import StorageDriver +from ovs.dal.hybrids.vdisk import VDisk +from ovs.dal.lists.vpoollist import VPoolList from ovs.dal.tests.helpers import DalHelper +from ovs.extensions.generic.configuration import Configuration +from ovs.extensions.storageserver.tests.mockups import StorageRouterClient from ovs.lib.generic import GenericController from ovs.lib.vdisk import VDiskController -from ovs.extensions.storageserver.tests.mockups import StorageRouterClient +from ovs.lib.helpers.generic.snapshots import SnapshotManager, RetentionPolicy + +MINUTE = 60 +HOUR = MINUTE * 60 +DAY = datetime.timedelta(1) class SnapshotTestCase(unittest.TestCase): """ - This test class will validate the various scenarios of the Generic logic + Test the scheduling of snapshot creation and the enforced retention policy Actual snapshot logic is tested in the vdisk_tests.test_snapshot """ + def setUp(self): """ (Re)Sets the stores on every test @@ -94,16 +103,14 @@ def test_clone_snapshot(self): 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (, , ) ) - vdisk_1 = structure['vdisks'][1] storagedriver_1 = structure['storagedrivers'][1] + vdisk_1 = structure['vdisks'][1] [dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 base = datetime.datetime.now().date() - base_timestamp = self._make_timestamp(base, datetime.timedelta(1)) - minute = 60 - hour = minute * 60 + base_timestamp = self._make_timestamp(base, DAY) for h in [6, 12, 18]: - timestamp = base_timestamp + (hour * h) + timestamp = base_timestamp + (HOUR * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, @@ -117,24 +124,26 @@ def test_clone_snapshot(self): clone_vdisk.save() for day in range(10): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) + base_timestamp = self._make_timestamp(base, DAY * day) for h in [6, 12, 18]: - timestamp = base_timestamp + (hour * h) + timestamp = base_timestamp + (HOUR * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(timestamp)}) - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * 2) + base_timestamp = self._make_timestamp(base, DAY * 2) GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) + timestamp=base_timestamp + (MINUTE * 30)) self.assertIn(base_snapshot_guid, vdisk_1.snapshot_ids, 'Snapshot was deleted while there are still clones of it') - def test_snapshot_automatic_consistent(self): + @staticmethod + def _build_vdisk_structure(): + # type: () -> Tuple[Dict[any, any], VDisk, StorageDriver] """ - is_automatic: True, is_consistent: True --> Automatically created consistent snapshots should be deleted + Build the DAL structure and retrieve the vdisk + :return: VDisk object + :rtype: Tuple[Dict[any, any], VDisk, StorageDriver] """ - minute = 60 - hour = minute * 60 structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (, , , ) @@ -142,309 +151,157 @@ def test_snapshot_automatic_consistent(self): 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (, , ) ) - base = datetime.datetime.now().date() - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] + storagedriver = structure['storagedrivers'][1] + storagedriver.invalidate_dynamics(['vdisks_guids']) + return structure, structure['vdisks'][1], storagedriver - label = 'c' - # Extra time to add to the hourly timestamps - additional_time = minute * 30 - # Hours to create a snapshot on - sticky_hours = [] - consistent_hours = [2] - inconsistent_hours = [] + def _create_validate_snapshots(self, vdisk_structure, start_time, sticky_hours, consistent_hours, inconsistent_hours, + snapshot_time_offset=0, automatic_snapshots=True, number_of_days=35): + # type: (VDisk, datetime.date, List[int], List[int], List[int], int, bool, int) -> None + """ + Create and validate snapshot creation and deletion sequence + This is suitable to enforce the default policy which is: + < 1d | 1d bucket | 1 | best of bucket | 1d + < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w + < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m + > 1m | delete + :param vdisk_structure: Structure to use + :type vdisk_structure: Dict[any, any] + :param start_time: Time when snapshots started to be made + :type start_time: datetime.datetime + :param sticky_hours: Hours that the sticky snapshots were made on + :type sticky_hours: List[int] + :param consistent_hours: Hours that the consistent snapshots were made on + :type consistent_hours: List[int] + :param inconsistent_hours: Hours that the inconsistent snapshots were made on + :type inconsistent_hours: List[int] + :param snapshot_time_offset: Offset time to create snapshot. Defaults to creating snapshot on the hour mark + :type snapshot_time_offset: int + :param automatic_snapshots: Indicate that the snapshots are made automatically (because of the scheduling) + :type automatic_snapshots: bool + """ + structure, vdisk, storagedriver = vdisk_structure # Snapshot details is_sticky = len(sticky_hours) > 0 is_consistent = len(consistent_hours) > 0 - is_automatic = True + label = 'c' if is_consistent else 'i' - for day in xrange(35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) + for day in xrange(number_of_days): + base_timestamp = self._make_timestamp(start_time, DAY * day) self._print_message('') self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) self._print_message('- Deleting snapshots') - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) + # The absolute timestamp is used when providing one. Going back a day to skip a day similar to the scheduled task + delete_snapshot_timestamp = base_timestamp + (MINUTE * 30) - DAY.total_seconds() + GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver.guid, + timestamp=delete_snapshot_timestamp) - self._validate(vdisk=vdisk_1, + self._validate(vdisk=vdisk, current_day=day, - base_date=base, + start_time=start_time, sticky_hours=sticky_hours, consistent_hours=consistent_hours, inconsistent_hours=inconsistent_hours) self._print_message('- Creating snapshots') for x in consistent_hours + inconsistent_hours: - timestamp = base_timestamp + (hour * x) + additional_time - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, + timestamp = base_timestamp + (HOUR * x) + snapshot_time_offset + VDiskController.create_snapshot(vdisk_guid=vdisk.guid, metadata={'label': 'ss_{0}_{1}:00'.format(label, x), 'is_sticky': is_sticky, 'timestamp': str(timestamp), - 'is_automatic': is_automatic, + 'is_automatic': automatic_snapshots, 'is_consistent': is_consistent}) + def test_snapshot_automatic_consistent(self): + """ + is_automatic: True, is_consistent: True --> Automatically created consistent snapshots should be deleted + """ + self._create_validate_snapshots(vdisk_structure=self._build_vdisk_structure(), + start_time=datetime.datetime.now().date(), + sticky_hours=[], + consistent_hours=[2], + inconsistent_hours=[], + snapshot_time_offset=MINUTE * 30, # Extra time to add to the hourly timestamps + automatic_snapshots=True) + def test_snapshot_automatic_not_consistent(self): """ is_automatic: True, is_consistent: False --> Automatically created non-consistent snapshots should be deleted """ - minute = 60 - hour = minute * 60 - structure = DalHelper.build_dal_structure( - {'vpools': [1], - 'vdisks': [(1, 1, 1, 1)], # (, , , ) - 'mds_services': [(1, 1)], - 'storagerouters': [1], - 'storagedrivers': [(1, 1, 1)]} # (, , ) - ) - base = datetime.datetime.now().date() - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] - - label = 'i' - # Extra time to add to the hourly timestamps - additional_time = 0 - # Hours to create a snapshot on - sticky_hours = [] - consistent_hours = [] - inconsistent_hours = [2] - # Snapshot details - is_sticky = len(sticky_hours) > 0 - is_consistent = len(consistent_hours) > 0 - is_automatic = True - - for day in xrange(35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) - self._print_message('') - self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) - - self._print_message('- Deleting snapshots') - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) - - self._validate(vdisk=vdisk_1, - current_day=day, - base_date=base, - sticky_hours=sticky_hours, - consistent_hours=consistent_hours, - inconsistent_hours=inconsistent_hours) - - self._print_message('- Creating snapshots') - for x in consistent_hours + inconsistent_hours: - timestamp = base_timestamp + (hour * x) + additional_time - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_{0}_{1}:00'.format(label, x), - 'is_sticky': is_sticky, - 'timestamp': str(timestamp), - 'is_automatic': is_automatic, - 'is_consistent': is_consistent}) + self._create_validate_snapshots(vdisk_structure=self._build_vdisk_structure(), + start_time=datetime.datetime.now().date(), + sticky_hours=[], + consistent_hours=[], + inconsistent_hours=[2], + snapshot_time_offset=0, # Extra time to add to the hourly timestamps + automatic_snapshots=True) def test_snapshot_non_automatic_consistent(self): """ is_automatic: False, is_consistent: True --> Manually created consistent snapshots should be deleted """ - minute = 60 - hour = minute * 60 - structure = DalHelper.build_dal_structure( - {'vpools': [1], - 'vdisks': [(1, 1, 1, 1)], # (, , , ) - 'mds_services': [(1, 1)], - 'storagerouters': [1], - 'storagedrivers': [(1, 1, 1)]} # (, , ) - ) - base = datetime.datetime.now().date() - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] - - label = 'c' - # Extra time to add to the hourly timestamps - additional_time = minute * 30 - # Hours to create a snapshot on - sticky_hours = [] - consistent_hours = [2] - inconsistent_hours = [] - # Snapshot details - is_sticky = len(sticky_hours) > 0 - is_consistent = len(consistent_hours) > 0 - is_automatic = False - - for day in xrange(35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) - self._print_message('') - self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) - - self._print_message('- Deleting snapshots') - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) - - self._validate(vdisk=vdisk_1, - current_day=day, - base_date=base, - sticky_hours=sticky_hours, - consistent_hours=consistent_hours, - inconsistent_hours=inconsistent_hours) - - self._print_message('- Creating snapshots') - for x in consistent_hours + inconsistent_hours: - timestamp = base_timestamp + (hour * x) + additional_time - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_{0}_{1}:00'.format(label, x), - 'is_sticky': is_sticky, - 'timestamp': str(timestamp), - 'is_automatic': is_automatic, - 'is_consistent': is_consistent}) + self._create_validate_snapshots(vdisk_structure=self._build_vdisk_structure(), + start_time=datetime.datetime.now().date(), + sticky_hours=[], + consistent_hours=[2], + inconsistent_hours=[], + snapshot_time_offset=MINUTE * 30, # Extra time to add to the hourly timestamps + automatic_snapshots=False) def test_snapshot_not_automatic_not_consistent(self): """ is_automatic: False, is_consistent: False --> Manually created non-consistent snapshots should be deleted """ - minute = 60 - hour = minute * 60 - structure = DalHelper.build_dal_structure( - {'vpools': [1], - 'vdisks': [(1, 1, 1, 1)], # (, , , ) - 'mds_services': [(1, 1)], - 'storagerouters': [1], - 'storagedrivers': [(1, 1, 1)]} # (, , ) - ) - base = datetime.datetime.now().date() - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] - - label = 'i' - # Extra time to add to the hourly timestamps - additional_time = 0 - # Hours to create a snapshot on - sticky_hours = [] - consistent_hours = [] - inconsistent_hours = [2] - # Snapshot details - is_sticky = len(sticky_hours) > 0 - is_consistent = len(consistent_hours) > 0 - is_automatic = False - - for day in xrange(35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) - self._print_message('') - self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) - - self._print_message('- Deleting snapshots') - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) - - self._validate(vdisk=vdisk_1, - current_day=day, - base_date=base, - sticky_hours=sticky_hours, - consistent_hours=consistent_hours, - inconsistent_hours=inconsistent_hours) - - self._print_message('- Creating snapshots') - for x in consistent_hours + inconsistent_hours: - timestamp = base_timestamp + (hour * x) + additional_time - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_{0}_{1}:00'.format(label, x), - 'is_sticky': is_sticky, - 'timestamp': str(timestamp), - 'is_automatic': is_automatic, - 'is_consistent': is_consistent}) + self._create_validate_snapshots(vdisk_structure=self._build_vdisk_structure(), + start_time=datetime.datetime.now().date(), + sticky_hours=[], + consistent_hours=[], + inconsistent_hours=[2], + snapshot_time_offset=0, # Extra time to add to the hourly timestamps + automatic_snapshots=False) def test_snapshot_sticky(self): """ is_sticky: True --> Sticky snapshots of any kind should never be deleted (Only possible to delete manually) """ - minute = 60 - hour = minute * 60 - structure = DalHelper.build_dal_structure( - {'vpools': [1], - 'vdisks': [(1, 1, 1, 1)], # (, , , ) - 'mds_services': [(1, 1)], - 'storagerouters': [1], - 'storagedrivers': [(1, 1, 1)]} # (, , ) - ) - base = datetime.datetime.now().date() - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] - - label = 'c' - # Extra time to add to the hourly timestamps - additional_time = minute * 30 - # Hours to create a snapshot on - sticky_hours = [2] - consistent_hours = [2] - inconsistent_hours = [] - # Snapshot details - is_sticky = len(sticky_hours) > 0 - is_consistent = len(consistent_hours) > 0 - is_automatic = False - - for day in xrange(35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) - self._print_message('') - self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) - - self._print_message('- Deleting snapshots') - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) - - self._validate(vdisk=vdisk_1, - current_day=day, - base_date=base, - sticky_hours=sticky_hours, - consistent_hours=consistent_hours, - inconsistent_hours=inconsistent_hours) - - self._print_message('- Creating snapshots') - for x in consistent_hours + inconsistent_hours: - timestamp = base_timestamp + (hour * x) + additional_time - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_{0}_{1}:00'.format(label, x), - 'is_sticky': is_sticky, - 'timestamp': str(timestamp), - 'is_automatic': is_automatic, - 'is_consistent': is_consistent}) - - def test_happypath(self): + self._create_validate_snapshots(vdisk_structure=self._build_vdisk_structure(), + start_time=datetime.datetime.now().date(), + sticky_hours=[2], + consistent_hours=[2], + inconsistent_hours=[], + snapshot_time_offset=MINUTE * 30, # Extra time to add to the hourly timestamps + automatic_snapshots=False) + + def test_happy_path(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistent every now and then. The delete policy is executed every day """ - structure = DalHelper.build_dal_structure( - {'vpools': [1], - 'vdisks': [(1, 1, 1, 1)], # (, , , ) - 'mds_services': [(1, 1)], - 'storagerouters': [1], - 'storagedrivers': [(1, 1, 1)]} # (, , ) - ) - vdisk_1 = structure['vdisks'][1] - storagedriver_1 = structure['storagedrivers'][1] + structure, vdisk_1, storagedriver_1 = self._build_vdisk_structure() [dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 # Run the testing scenario - travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' - if travis is True: - self._print_message('Running in Travis, reducing output.') base = datetime.datetime.now().date() - minute = 60 - hour = minute * 60 consistent_hours = [6, 12, 18] inconsistent_hours = xrange(2, 23) for day in xrange(0, 35): - base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) + base_timestamp = self._make_timestamp(base, DAY * day) self._print_message('') self._print_message('Day cycle: {0}: {1}'.format(day, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))) # At the start of the day, delete snapshot policy runs at 00:30 self._print_message('- Deleting snapshots') GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid, - timestamp=base_timestamp + (minute * 30)) + timestamp=base_timestamp + (MINUTE * 30) - DAY.total_seconds()) # Validate snapshots self._print_message('- Validating snapshots') self._validate(vdisk=vdisk_1, current_day=day, - base_date=base, + start_time=base, sticky_hours=[], consistent_hours=consistent_hours, inconsistent_hours=inconsistent_hours) @@ -454,17 +311,18 @@ def test_happypath(self): # - Create consistent snapshot at 6:30, 12:30, 18:30 self._print_message('- Creating snapshots') for h in inconsistent_hours: - timestamp = base_timestamp + (hour * h) - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_i_{0}:00'.format(str(h)), - 'is_consistent': False, - 'timestamp': str(timestamp)}) + timestamp = base_timestamp + (HOUR * h) + snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, + metadata={'label': 'ss_i_{0}:00'.format(str(h)), + 'is_consistent': False, + 'timestamp': str(timestamp)}) + self._print_message('- Created inconsistent snapshot {} for vDisk {} on hour {}'.format(snapshot_id, vdisk_1.guid, h)) if h in consistent_hours: - ts = (timestamp + (minute * 30)) - VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, - metadata={'label': 'ss_c_{0}:30'.format(str(h)), - 'is_consistent': True, - 'timestamp': str(ts)}) + ts = (timestamp + (MINUTE * 30)) + snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={'label': 'ss_c_{0}:30'.format(str(h)), + 'is_consistent': True, + 'timestamp': str(ts)}) + self._print_message('- Created consistent snapshot {} for vDisk {} on hour {}'.format(snapshot_id, vdisk_1.guid, h)) def test_exception_handling(self): """ @@ -483,7 +341,7 @@ def raise_an_exception(*args, **kwargs): vdisk_1, vdisk_2 = structure['vdisks'].values() storagedriver_1 = structure['storagedrivers'][1] - + storagedriver_1.invalidate_dynamics(['vdisks_guids']) vdisks = [vdisk_1, vdisk_2] for vdisk in vdisks: @@ -497,7 +355,7 @@ def raise_an_exception(*args, **kwargs): if vdisk == vdisk_1: StorageRouterClient.delete_snapshot_callbacks[vdisk.volume_id] = {snapshot_id: raise_an_exception} with self.assertRaises(RuntimeError): - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid) + GenericController.delete_snapshots_storagedriver(storagedriver_1.guid) self.assertEqual(1, len(vdisk_2.snapshot_ids), 'One snapshot should be removed for vdisk 2') self.assertEqual(2, len(vdisk_1.snapshot_ids), 'No snapshots should be removed for vdisk 1') @@ -517,7 +375,7 @@ def raise_an_exception(*args, **kwargs): ) vdisk_1 = structure['vdisks'][1] storagedriver_1 = structure['storagedrivers'][1] - + storagedriver_1.invalidate_dynamics(['vdisks_guids']) [dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 for i in xrange(0, 2): @@ -528,8 +386,8 @@ def raise_an_exception(*args, **kwargs): snapshot_id = VDiskController.create_snapshot(vdisk_1.guid, metadata) StorageRouterClient.delete_snapshot_callbacks[vdisk_1.volume_id] = {snapshot_id: raise_an_exception} - GenericController.delete_snapshots_storagedriver(storagedriver_guid=storagedriver_1.guid) - self.assertEqual(2, len(vdisk_1.snapshot_ids), 'No snapshots should be removed for vdisk 1') + GenericController.delete_snapshots_storagedriver(storagedriver_1.guid) + self.assertEqual(2, len(vdisk_1.snapshot_ids), "No snapshots should be removed for vdisk 1") ################## # HELPER METHODS # @@ -538,114 +396,209 @@ def _print_message(self, message): if self.debug is True: print message - def _validate(self, vdisk, current_day, base_date, sticky_hours, consistent_hours, inconsistent_hours): + def _visualise_snapshots(self, vdisk, current_day, start_time): + # type: (VDisk, int, datetime.date) -> None + """ + Visualize the snapshots of the VDisk + :param vdisk: VDisk object + :type vdisk: VDisk + :param current_day: Number of the current day + :type current_day: int + :param start_time: Time when snapshots started to be made + :type start_time: datetime.datetime + :return: + """ + snapshots = {} + for snapshot in vdisk.snapshots: + snapshots[int(snapshot['timestamp'])] = snapshot + for day in xrange(0, current_day + 1): + timestamp = self._make_timestamp(start_time, DAY * day) + visual = '\t\t- {0} '.format(datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')) + for t in xrange(timestamp, timestamp + HOUR * 24, MINUTE * 30): + if t in snapshots: + visual += 'S' if snapshots[t]['is_sticky'] else 'C' if snapshots[t]['is_consistent'] else 'R' + else: + visual += '-' + self._print_message(visual) + + def _validate(self, vdisk, current_day, start_time, sticky_hours, consistent_hours, inconsistent_hours): + # type: (VDisk, int, datetime.date, List[int], List[int], List[int]) -> None """ This validates assumes the same policy as currently implemented in the policy code itself. In case the policy strategy ever changes, this unittest should be adapted as well or rewritten to load the implemented policy + :param vdisk: VDisk to validate + :type vdisk: VDisk + :param current_day: Number of the current day + :type current_day: int + :param start_time: Time when snapshots started to be made + :type start_time: datetime.date + :param sticky_hours: Hours that the sticky snapshots were made on + :type sticky_hours: List[int] + :param consistent_hours: Hours that the consistent snapshots were made on + :type consistent_hours: List[int] + :param inconsistent_hours: Hours that the inconsistent snapshots were made on + :type inconsistent_hours: List[int] """ - # Implemented policy: # < 1d | 1d bucket | 1 | best of bucket | 1d # < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w # < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m # > 1m | delete - minute = 60 - hour = minute * 60 - - self._print_message(' - {0}'.format(vdisk.name)) + self._print_message('\t- VDisk {0}'.format(vdisk.name)) # Visualisation - if self.debug is True: - snapshots = {} - for snapshot in vdisk.snapshots: - snapshots[int(snapshot['timestamp'])] = snapshot - for day in xrange(0, current_day + 1): - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) - visual = ' - {0} '.format(datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')) - for t in xrange(timestamp, timestamp + hour * 24, minute * 30): - if t in snapshots: - visual += 'S' if snapshots[t]['is_sticky'] else 'C' if snapshots[t]['is_consistent'] else 'R' - else: - visual += '-' - self._print_message(visual) - - sticky = [int(s['timestamp']) for s in vdisk.snapshots if s['is_sticky'] is True] - consistent = [int(s['timestamp']) for s in vdisk.snapshots if s['is_consistent'] is True] - inconsistent = [int(s['timestamp']) for s in vdisk.snapshots if s['is_consistent'] is False] - self._print_message(' - {0} consistent, {1} inconsistent, {2} sticky'.format(len(consistent), len(inconsistent), len(sticky))) + if self.debug: + self._visualise_snapshots(vdisk, current_day, start_time) + + sticky = [int(s['timestamp']) for s in vdisk.snapshots if s['is_sticky']] + consistent = [int(s['timestamp']) for s in vdisk.snapshots if s['is_consistent']] + inconsistent = [int(s['timestamp']) for s in vdisk.snapshots if not s['is_consistent']] + self._print_message('\t\t- {0} consistent, {1} inconsistent, {2} sticky'.format(len(consistent), len(inconsistent), len(sticky))) # Check for correct amount of snapshots - amount_sticky = len(sticky_hours) * current_day + amount_sticky = len(sticky_hours) * current_day # Stickies do not get removed automatically amount_consistent = 0 amount_inconsistent = 0 - pointer = 0 - if pointer < current_day: + processed_days = 0 + # First 24h period which are skipped so all taken snapshots are kept + if processed_days < current_day: amount_consistent += len(consistent_hours) amount_inconsistent += len(inconsistent_hours) - pointer += 1 - while pointer < current_day and pointer <= 7: + processed_days += 1 + + # One consistent snapshot per day + while processed_days < current_day and processed_days <= 7: if len(consistent_hours) > 0: - amount_consistent += 1 # One consistent snapshot per day + amount_consistent += 1 else: amount_inconsistent += 1 - pointer += 1 - while pointer < current_day and pointer <= 28: + processed_days += 1 + # One consistent snapshot per week + while processed_days < current_day and processed_days <= 28: if len(consistent_hours) > 0: - amount_consistent += 1 # One consistent snapshot per week + amount_consistent += 1 else: amount_inconsistent += 1 - pointer += 7 + processed_days += 7 self.assertEqual(first=len(sticky), second=amount_sticky, msg='Wrong amount of sticky snapshots: {0} vs expected {1}'.format(len(sticky), amount_sticky)) - if len(sticky) == 0: - self.assertEqual(first=len(consistent), - second=amount_consistent, - msg='Wrong amount of consistent snapshots: {0} vs expected {1}'.format(len(consistent), amount_consistent)) - self.assertEqual(first=len(inconsistent), - second=amount_inconsistent, - msg='Wrong amount of inconsistent snapshots: {0} vs expected {1}'.format(len(inconsistent), amount_inconsistent)) + if not sticky: + self.assertEqual(first=amount_consistent, second=len(consistent), + msg='Wrong amount of consistent snapshots') + self.assertEqual(first=amount_inconsistent, second=len(inconsistent), + msg='Wrong amount of inconsistent snapshots') # Check of the correctness of the snapshot timestamp - if len(consistent_hours) > 0: + if consistent_hours: sn_type = 'consistent' container = consistent - time_diff = (hour * consistent_hours[-1]) + (minute * 30) + time_diff = (HOUR * consistent_hours[-1]) + (MINUTE * 30) else: sn_type = 'inconsistent' container = inconsistent - time_diff = (hour * inconsistent_hours[-1]) + time_diff = (HOUR * inconsistent_hours[-1]) for day in xrange(0, current_day): for h in sticky_hours: - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) + (hour * h) + (minute * 30) - self.assertIn(member=timestamp, - container=sticky, + timestamp = self._make_timestamp(start_time, DAY * day) + (HOUR * h) + (MINUTE * 30) + self.assertIn(member=timestamp, container=sticky, msg='Expected sticky snapshot for {0} at {1}'.format(vdisk.name, self._from_timestamp(timestamp))) if day == (current_day - 1): for h in inconsistent_hours: - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) + (hour * h) - self.assertIn(member=timestamp, - container=inconsistent, + timestamp = self._make_timestamp(start_time, DAY * day) + (HOUR * h) + self.assertIn(member=timestamp, container=inconsistent, msg='Expected hourly inconsistent snapshot for {0} at {1}'.format(vdisk.name, self._from_timestamp(timestamp))) for h in consistent_hours: - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) + (hour * h) + (minute * 30) - self.assertIn(member=timestamp, - container=consistent, + timestamp = self._make_timestamp(start_time, DAY * day) + (HOUR * h) + (MINUTE * 30) + self.assertIn(member=timestamp, container=consistent, msg='Expected random consistent snapshot for {0} at {1}'.format(vdisk.name, self._from_timestamp(timestamp))) elif day > (current_day - 7): - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) + time_diff - self.assertIn(member=timestamp, - container=container, + timestamp = self._make_timestamp(start_time, DAY * day) + time_diff + self.assertIn(member=timestamp, container=container, msg='Expected daily {0} snapshot for {1} at {2}'.format(sn_type, vdisk.name, self._from_timestamp(timestamp))) elif day % 7 == 0 and day > 28: - timestamp = self._make_timestamp(base_date, datetime.timedelta(1) * day) + time_diff - self.assertIn(member=timestamp, - container=container, + timestamp = self._make_timestamp(start_time, DAY * day) + time_diff + self.assertIn(member=timestamp, container=container, msg='Expected weekly {0} snapshot for {1} at {2}'.format(sn_type, vdisk.name, self._from_timestamp(timestamp))) + def test_retention_policy_configuration_levels(self): + """ + Test the different retention policy settings + :return: + """ + global_config = [{'nr_of_days': 30, 'nr_of_snapshots': 30}] + vpool_config = [{'nr_of_days': 7, 'nr_of_snapshots': 7}] + vdisk_config = [{'nr_of_days': 1, 'nr_of_snapshots': 1}] + + Configuration.set(SNAPSHOT_POLICY_LOCATION, global_config) + structure, vdisk_1, storagedriver_1 = self._build_vdisk_structure() + vpool_1 = storagedriver_1.vpool + + # Global configuration + snapshot_manager = SnapshotManager(storagedriver_1) + + policy_check = RetentionPolicy.from_configuration(global_config)[0] + policy = snapshot_manager.get_policy_to_enforce(vdisk_1)[0] + self.assertEqual(policy_check, policy) + + # VPool configuration + vpool_1.snapshot_retention_policy = vpool_config + vpool_1.save() + + snapshot_manager = SnapshotManager(storagedriver_1) + + policy_check = RetentionPolicy.from_configuration(vpool_config)[0] + policy = snapshot_manager.get_policy_to_enforce(vdisk_1)[0] + self.assertEqual(policy_check, policy) + + # VDisk Configuration + snapshot_manager = SnapshotManager(storagedriver_1) + + vdisk_1.snapshot_retention_policy = vdisk_config + vdisk_1.save() + + policy_check = RetentionPolicy.from_configuration(vdisk_config)[0] + policy = snapshot_manager.get_policy_to_enforce(vdisk_1)[0] + self.assertEqual(policy_check, policy) + + def test_retention_policy_overlap(self): + """ + Test the application of the retention policy settings with overlapping timespans + """ + global_config = [{'nr_of_days': 1, 'nr_of_snapshots': 1}, + {'nr_of_days': 2, 'nr_of_snapshots': 1, 'consistency_first': True}] + # The first theory (invalid one, but good to write down nonetheless: + # Day 1 will have 1 consistent and 1 inconsistent snapshot. Inconsistent is older + # Day 2 will have 2 inconsistent snapshots + # The goal is to have both buckets still retain their snapshots + # The bucket logic will distribute all snapshots to buckets that can fit them. Both buckets have the same start day + # Bucket 1 (1 day) will have the consistent and inconsistent one + # Bucket 2 (2 days) will have all the snapshots + # Bucket 1 will choose the oldest, discarding the consistent one when removing + # Bucket 2 will choose the consistent one above all else + # In the end, bucket 2 won't have a snapshot + + # After reviewing the code: no overlap is possible as it increments the days that are processed. + # It doesn't review every period by itself, which would be a nightmare on it's own + Configuration.set(SNAPSHOT_POLICY_LOCATION, global_config) + structure, vdisk_1, storagedriver_1 = self._build_vdisk_structure() + start_time = datetime.datetime.now().date() + snapshots = [] + for day, snapshot_consistencies in {1: [True, False], + 2: [False, False]}.iteritems(): + day_timestamp = self._make_timestamp(start_time, day * DAY * -1) + for index, consistency in enumerate(snapshot_consistencies): + snapshot_timestamp = day_timestamp + (HOUR * index) + snapshots.append(VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, + metadata={'label': 'snapshot_{0}:30'.format(str(index)), + 'is_consistent': consistency, + 'timestamp': str(snapshot_timestamp)})) + GenericController.delete_snapshots_storagedriver(storagedriver_1.guid) + self.assertEqual(2, len(vdisk_1._snapshot_ids())) + @staticmethod def _make_timestamp(base, offset): return int(time.mktime((base + offset).timetuple())) diff --git a/webapps/api/backend/decorators.py b/webapps/api/backend/decorators.py index bfc44807d..a7fb2dfd1 100644 --- a/webapps/api/backend/decorators.py +++ b/webapps/api/backend/decorators.py @@ -136,7 +136,7 @@ def validate_get_version(request): :type Request: Union[WSGIRequest, Request] :return: The parsed and non parsed request :rtype: Tuple[int, str] - :exception: HttpNotAcceptableException when the version is not within the supported versions of the api + :raises: HttpNotAcceptableException when the version is not within the supported versions of the api """ version_match = regex.match(request.META['HTTP_ACCEPT']) if version_match is not None: @@ -257,7 +257,7 @@ def load_dataobject_instance(passed_kwargs): :type passed_kwargs: Dict[str, any] :return: The loaded instance (if any) :rtype: Union[DataObject, None] - :exception HttpNotFoundException if the requested object could not be found + :raises HttpNotFoundException if the requested object could not be found """ instance = None if 'pk' in passed_kwargs and object_type is not None: diff --git a/webapps/api/backend/views/storagerouters.py b/webapps/api/backend/views/storagerouters.py index 00be16e4d..bc5eb292e 100644 --- a/webapps/api/backend/views/storagerouters.py +++ b/webapps/api/backend/views/storagerouters.py @@ -25,11 +25,13 @@ from api.backend.decorators import required_roles, return_list, return_object, return_task, return_simple, load, log from api.backend.exceptions import HttpNotAcceptableException from api.backend.serializers.serializers import FullSerializer +from ovs.constants.vdisk import SNAPSHOT_POLICY_LOCATION, SNAPSHOT_POLICY_DEFAULT from ovs.dal.datalist import DataList from ovs.dal.hybrids.domain import Domain from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.j_storagerouterdomain import StorageRouterDomain from ovs.dal.lists.storagerouterlist import StorageRouterList +from ovs.extensions.generic.configuration import Configuration from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.lib.disk import DiskController from ovs.lib.mdsservice import MDSServiceController @@ -38,6 +40,7 @@ from ovs.lib.storagerouter import StorageRouterController from ovs.lib.update import UpdateController from ovs.lib.vdisk import VDiskController +from ovs.lib.helpers.generic.snapshots import RetentionPolicy class StorageRouterViewSet(viewsets.ViewSet): @@ -651,3 +654,41 @@ def get_update_information(self): - prerequisites that have not been met """ return UpdateController.get_update_information_all.delay() + + @action() + @log() + @required_roles(['read', 'write', 'manage']) + @return_simple() + @load(StorageRouter) + def snapshot_retention_policy(self, storagerouter, policy): + """ + Set the snapshot retention policy on the vDisk level + :param storagerouter: The given Storagerouter. Not used but necessary because of the lack of generic routes + :type storagerouter: StorageRouter + :param policy: Retention policy to set + :type policy: List[Dict[str, int]] + :return: None + :rtype: None + """ + _ = storagerouter + try: + RetentionPolicy.from_configuration(policy) + except: + raise ValueError('Policy is not properly formatted') + Configuration.set(SNAPSHOT_POLICY_LOCATION, policy) + + @link() + @log() + @required_roles(['read', 'manage']) + @return_simple() + @load(StorageRouter) + def global_snapshot_retention_policy(self, storagerouter): + """ + Get the snapshot retention policy on the vDisk level + :param storagerouter: The given Storagerouter. Not used but necessary because of the lack of generic routes + :type storagerouter: StorageRouter + :return: The snapshot policy + :rtype: List[Dict[str, int]] + """ + _ = storagerouter + return Configuration.get(SNAPSHOT_POLICY_LOCATION, default=SNAPSHOT_POLICY_DEFAULT) diff --git a/webapps/api/backend/views/vdisks.py b/webapps/api/backend/views/vdisks.py index 68ac512a7..387d72914 100644 --- a/webapps/api/backend/views/vdisks.py +++ b/webapps/api/backend/views/vdisks.py @@ -21,7 +21,7 @@ from rest_framework import viewsets from rest_framework.decorators import action, link from rest_framework.permissions import IsAuthenticated -from api.backend.decorators import load, log, required_roles, return_list, return_object, return_task +from api.backend.decorators import load, log, required_roles, return_list, return_object, return_task, return_simple from api.backend.exceptions import HttpNotAcceptableException from ovs.dal.datalist import DataList from ovs.dal.hybrids.diskpartition import DiskPartition @@ -32,6 +32,7 @@ from ovs.dal.lists.vdisklist import VDiskList from ovs.lib.generic import GenericController from ovs.lib.vdisk import VDiskController +from ovs.lib.helpers.generic.snapshots import RetentionPolicy class VDiskViewSet(viewsets.ViewSet): @@ -519,3 +520,25 @@ def scrub(self, vdisk, storagerouter_guid=None): :rtype: celery.result.AsyncResult """ return GenericController.execute_scrub.delay(vdisk_guids=[vdisk.guid], storagerouter_guid=storagerouter_guid, manual=True) + + @action() + @log() + @required_roles(['read', 'write', 'manage']) + @return_simple() + @load(VPool) + def snapshot_retention_policy(self, vdisk, policy): + """ + Set the snapshot retention policy on the vDisk level + :param vdisk: the vDisk to scrub + :type vdisk: VDisk + :param policy: Retention policy to set + :type policy: List[Dict[str, int]] + :return: None + :rtype: None + """ + try: + RetentionPolicy.from_configuration(policy) + except: + raise ValueError('Policy is not properly formatted') + vdisk.snapshot_retention_policy = policy + vdisk.save() diff --git a/webapps/api/backend/views/vpools.py b/webapps/api/backend/views/vpools.py index 84ec527aa..e94db124f 100644 --- a/webapps/api/backend/views/vpools.py +++ b/webapps/api/backend/views/vpools.py @@ -31,6 +31,7 @@ from ovs.lib.generic import GenericController from ovs.lib.storagerouter import StorageRouterController from ovs.lib.vdisk import VDiskController +from ovs.lib.helpers.generic.snapshots import RetentionPolicy class VPoolViewSet(viewsets.ViewSet): @@ -228,3 +229,25 @@ def scrub_all_vdisks(self, vpool): :rtype: celery.result.AsyncResult """ return GenericController.execute_scrub.delay(vpool_guids=[vpool.guid], manual=True) + + @action() + @log() + @required_roles(['read', 'write', 'manage']) + @return_simple() + @load(VPool) + def snapshot_retention_policy(self, vpool, policy): + """ + Set the snapshot retention policy on the vpool level + :param vpool: The GUID of the vPool for which all vDisks need to be scrubbed + :type vpool: ovs.dal.hybrids.vpool.VPool + :param policy: Retention policy to set + :type policy: List[Dict[str, int]] + :return: None + :rtype: None + """ + try: + RetentionPolicy.from_configuration(policy) + except: + raise ValueError('Policy is not properly formatted') + vpool.snapshot_retention_policy = policy + vpool.save()